From e58e854a328aeac9a8876476be21918c21707e64 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Wed, 20 Mar 2024 08:55:58 +0200 Subject: [PATCH 001/128] Expose `ClaimQueue` via a runtime api and use it in `collation-generation` (#3580) The PR adds two things: 1. Runtime API exposing the whole claim queue 2. Consumes the API in `collation-generation` to fetch the next scheduled `ParaEntry` for an occupied core. Related to https://github.com/paritytech/polkadot-sdk/issues/1797 --- Cargo.lock | 1 + .../src/blockchain_rpc_client.rs | 13 +- .../src/rpc_client.rs | 17 +- polkadot/node/collation-generation/Cargo.toml | 1 + polkadot/node/collation-generation/src/lib.rs | 74 +++- .../node/collation-generation/src/tests.rs | 329 +++++++++++++++++- polkadot/node/core/runtime-api/src/cache.rs | 29 +- polkadot/node/core/runtime-api/src/lib.rs | 11 + polkadot/node/core/runtime-api/src/tests.rs | 18 +- polkadot/node/subsystem-types/src/messages.rs | 10 +- .../subsystem-types/src/runtime_client.rs | 22 +- polkadot/node/subsystem-util/src/lib.rs | 8 +- polkadot/primitives/src/runtime_api.rs | 17 +- .../src/runtime_api_impl/vstaging.rs | 21 +- prdoc/pr_3580.prdoc | 13 + 15 files changed, 532 insertions(+), 52 deletions(-) create mode 100644 prdoc/pr_3580.prdoc diff --git a/Cargo.lock b/Cargo.lock index 5c0066e9728..ee813b60218 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12396,6 +12396,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", "sp-core", "sp-keyring", "sp-maybe-compressed-blob", diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index ab56b62c4ca..8d8a2920b4e 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -14,7 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use std::pin::Pin; +use std::{ + collections::{BTreeMap, VecDeque}, + pin::Pin, +}; use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use cumulus_relay_chain_rpc_interface::RelayChainRpcClient; @@ -25,6 +28,7 @@ use polkadot_primitives::{ async_backing::{AsyncBackingParams, BackingState}, slashing, vstaging::{ApprovalVotingParams, NodeFeatures}, + CoreIndex, }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; use sc_client_api::AuxStore; @@ -442,6 +446,13 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { async fn node_features(&self, at: Hash) -> Result { Ok(self.rpc_client.parachain_host_node_features(at).await?) } + + async fn claim_queue( + &self, + at: Hash, + ) -> Result>, ApiError> { + Ok(self.rpc_client.parachain_host_claim_queue(at).await?) + } } #[async_trait::async_trait] diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index 6578210a259..8cf5ccf0c70 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -24,6 +24,7 @@ use jsonrpsee::{ }; use serde::de::DeserializeOwned; use serde_json::Value as JsonValue; +use std::collections::VecDeque; use tokio::sync::mpsc::Sender as TokioSender; use parity_scale_codec::{Decode, Encode}; @@ -34,10 +35,10 @@ use cumulus_primitives_core::{ slashing, vstaging::{ApprovalVotingParams, NodeFeatures}, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, - Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, OccupiedCoreAssumption, - PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, + OccupiedCoreAssumption, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -647,6 +648,14 @@ impl RelayChainRpcClient { .await } + pub async fn parachain_host_claim_queue( + &self, + at: RelayHash, + ) -> Result>, RelayChainError> { + self.call_remote_runtime_function("ParachainHost_claim_queue", at, None::<()>) + .await + } + pub async fn validation_code_hash( &self, at: RelayHash, diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index 8df0c2b1eda..f72af87c15e 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -26,4 +26,5 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } assert_matches = "1.4.0" +rstest = "0.18.2" sp-keyring = { path = "../../../substrate/primitives/keyring" } diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index a89351628a0..3b1a8f5ff23 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -38,21 +38,25 @@ use polkadot_node_primitives::{ SubmitCollationParams, }; use polkadot_node_subsystem::{ - messages::{CollationGenerationMessage, CollatorProtocolMessage}, + messages::{CollationGenerationMessage, CollatorProtocolMessage, RuntimeApiRequest}, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, RuntimeApiError, SpawnedSubsystem, SubsystemContext, SubsystemError, SubsystemResult, }; use polkadot_node_subsystem_util::{ - request_async_backing_params, request_availability_cores, request_persisted_validation_data, - request_validation_code, request_validation_code_hash, request_validators, + has_required_runtime, request_async_backing_params, request_availability_cores, + request_claim_queue, request_persisted_validation_data, request_validation_code, + request_validation_code_hash, request_validators, }; use polkadot_primitives::{ collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt, - CollatorPair, CoreState, Hash, Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, - ValidationCodeHash, + CollatorPair, CoreIndex, CoreState, Hash, Id as ParaId, OccupiedCoreAssumption, + PersistedValidationData, ScheduledCore, ValidationCodeHash, }; use sp_core::crypto::Pair; -use std::sync::Arc; +use std::{ + collections::{BTreeMap, VecDeque}, + sync::Arc, +}; mod error; @@ -223,6 +227,7 @@ async fn handle_new_activations( let availability_cores = availability_cores??; let n_validators = validators??.len(); let async_backing_params = async_backing_params?.ok(); + let maybe_claim_queue = fetch_claim_queue(ctx.sender(), relay_parent).await?; for (core_idx, core) in availability_cores.into_iter().enumerate() { let _availability_core_timer = metrics.time_new_activations_availability_core(); @@ -239,10 +244,25 @@ async fn handle_new_activations( // TODO [now]: this assumes that next up == current. // in practice we should only set `OccupiedCoreAssumption::Included` // when the candidate occupying the core is also of the same para. - if let Some(scheduled) = occupied_core.next_up_on_available { - (scheduled, OccupiedCoreAssumption::Included) - } else { - continue + let res = match maybe_claim_queue { + Some(ref claim_queue) => { + // read what's in the claim queue for this core + fetch_next_scheduled_on_core( + claim_queue, + CoreIndex(core_idx as u32), + ) + }, + None => { + // Runtime doesn't support claim queue runtime api. Fallback to + // `next_up_on_available` + occupied_core.next_up_on_available + }, + } + .map(|scheduled| (scheduled, OccupiedCoreAssumption::Included)); + + match res { + Some(res) => res, + None => continue, } }, _ => { @@ -600,3 +620,37 @@ fn erasure_root( let chunks = polkadot_erasure_coding::obtain_chunks_v1(n_validators, &available_data)?; Ok(polkadot_erasure_coding::branches(&chunks).root()) } + +// Checks if the runtime supports `request_claim_queue` and executes it. Returns `Ok(None)` +// otherwise. Any [`RuntimeApiError`]s are bubbled up to the caller. +async fn fetch_claim_queue( + sender: &mut impl overseer::CollationGenerationSenderTrait, + relay_parent: Hash, +) -> crate::error::Result>>> { + if has_required_runtime( + sender, + relay_parent, + RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + ) + .await + { + let res = request_claim_queue(relay_parent, sender).await.await??; + Ok(Some(res)) + } else { + gum::trace!(target: LOG_TARGET, "Runtime doesn't support `request_claim_queue`"); + Ok(None) + } +} + +// Returns the next scheduled `ParaId` for a core in the claim queue, wrapped in `ScheduledCore`. +// This function is supposed to be used in `handle_new_activations` hence the return type. +fn fetch_next_scheduled_on_core( + claim_queue: &BTreeMap>, + core_idx: CoreIndex, +) -> Option { + claim_queue + .get(&core_idx)? + .front() + .cloned() + .map(|para_id| ScheduledCore { para_id, collator: None }) +} diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs index eb0ede6ef6b..9b16980e6af 100644 --- a/polkadot/node/collation-generation/src/tests.rs +++ b/polkadot/node/collation-generation/src/tests.rs @@ -25,15 +25,18 @@ use polkadot_node_primitives::{BlockData, Collation, CollationResult, MaybeCompr use polkadot_node_subsystem::{ errors::RuntimeApiError, messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}, + ActivatedLeaf, }; use polkadot_node_subsystem_test_helpers::{subsystem_test_harness, TestSubsystemContextHandle}; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::{ - CollatorPair, HeadData, Id as ParaId, PersistedValidationData, ScheduledCore, ValidationCode, + AsyncBackingParams, CollatorPair, HeadData, Id as ParaId, Id, PersistedValidationData, + ScheduledCore, ValidationCode, }; +use rstest::rstest; use sp_keyring::sr25519::Keyring as Sr25519Keyring; use std::pin::Pin; -use test_helpers::{dummy_hash, dummy_head_data, dummy_validator}; +use test_helpers::{dummy_candidate_descriptor, dummy_hash, dummy_head_data, dummy_validator}; type VirtualOverseer = TestSubsystemContextHandle; @@ -132,8 +135,10 @@ fn scheduled_core_for>(para_id: Id) -> ScheduledCore { ScheduledCore { para_id: para_id.into(), collator: None } } -#[test] -fn requests_availability_per_relay_parent() { +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +fn requests_availability_per_relay_parent(#[case] runtime_version: u32) { let activated_hashes: Vec = vec![[1; 32].into(), [4; 32].into(), [9; 32].into(), [16; 32].into()]; @@ -159,6 +164,18 @@ fn requests_availability_per_relay_parent() { ))) => { tx.send(Err(RuntimeApiError::NotSupported { runtime_api_name: "doesnt_matter" })).unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::Version(tx), + ))) => { + tx.send(Ok(runtime_version)).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ClaimQueue(tx), + ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { + tx.send(Ok(BTreeMap::new())).unwrap(); + }, Some(msg) => panic!("didn't expect any other overseer requests given no availability cores; got {:?}", msg), } } @@ -184,8 +201,10 @@ fn requests_availability_per_relay_parent() { assert_eq!(requested_availability_cores, activated_hashes); } -#[test] -fn requests_validation_data_for_scheduled_matches() { +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +fn requests_validation_data_for_scheduled_matches(#[case] runtime_version: u32) { let activated_hashes: Vec = vec![ Hash::repeat_byte(1), Hash::repeat_byte(4), @@ -242,6 +261,18 @@ fn requests_validation_data_for_scheduled_matches() { })) .unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::Version(tx), + ))) => { + tx.send(Ok(runtime_version)).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ClaimQueue(tx), + ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { + tx.send(Ok(BTreeMap::new())).unwrap(); + }, Some(msg) => { panic!("didn't expect any other overseer requests; got {:?}", msg) }, @@ -271,8 +302,10 @@ fn requests_validation_data_for_scheduled_matches() { assert_eq!(requested_validation_data, vec![[4; 32].into()]); } -#[test] -fn sends_distribute_collation_message() { +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +fn sends_distribute_collation_message(#[case] runtime_version: u32) { let activated_hashes: Vec = vec![ Hash::repeat_byte(1), Hash::repeat_byte(4), @@ -339,6 +372,18 @@ fn sends_distribute_collation_message() { })) .unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::Version(tx), + ))) => { + tx.send(Ok(runtime_version)).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ClaimQueue(tx), + ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { + tx.send(Ok(BTreeMap::new())).unwrap(); + }, Some(msg @ AllMessages::CollatorProtocol(_)) => { inner_to_collator_protocol.lock().await.push(msg); }, @@ -423,8 +468,10 @@ fn sends_distribute_collation_message() { } } -#[test] -fn fallback_when_no_validation_code_hash_api() { +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +fn fallback_when_no_validation_code_hash_api(#[case] runtime_version: u32) { // This is a variant of the above test, but with the validation code hash API disabled. let activated_hashes: Vec = vec![ @@ -501,9 +548,22 @@ fn fallback_when_no_validation_code_hash_api() { })) .unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::Version(tx), + ))) => { + tx.send(Ok(runtime_version)).unwrap(); + }, Some(msg @ AllMessages::CollatorProtocol(_)) => { inner_to_collator_protocol.lock().await.push(msg); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ClaimQueue(tx), + ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { + let res = BTreeMap::>::new(); + tx.send(Ok(res)).unwrap(); + }, Some(msg) => { panic!("didn't expect any other overseer requests; got {:?}", msg) }, @@ -635,3 +695,252 @@ fn submit_collation_leads_to_distribution() { virtual_overseer }); } + +// There is one core in `Occupied` state and async backing is enabled. On new head activation +// `CollationGeneration` should produce and distribute a new collation. +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +fn distribute_collation_for_occupied_core_with_async_backing_enabled(#[case] runtime_version: u32) { + let activated_hash: Hash = [1; 32].into(); + let para_id = ParaId::from(5); + + // One core, in occupied state. The data in `CoreState` and `ClaimQueue` should match. + let cores: Vec = vec![CoreState::Occupied(polkadot_primitives::OccupiedCore { + next_up_on_available: Some(ScheduledCore { para_id, collator: None }), + occupied_since: 1, + time_out_at: 10, + next_up_on_time_out: Some(ScheduledCore { para_id, collator: None }), + availability: Default::default(), // doesn't matter + group_responsible: polkadot_primitives::GroupIndex(0), + candidate_hash: Default::default(), + candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), + })]; + let claim_queue = BTreeMap::from([(CoreIndex::from(0), VecDeque::from([para_id]))]); + + test_harness(|mut virtual_overseer| async move { + helpers::initialize_collator(&mut virtual_overseer, para_id).await; + helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + helpers::handle_runtime_calls_on_new_head_activation( + &mut virtual_overseer, + activated_hash, + AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, + cores, + runtime_version, + claim_queue, + ) + .await; + helpers::handle_core_processing_for_a_leaf( + &mut virtual_overseer, + activated_hash, + para_id, + // `CoreState` is `Occupied` => `OccupiedCoreAssumption` is `Included` + OccupiedCoreAssumption::Included, + ) + .await; + + virtual_overseer + }); +} + +// There is one core in `Occupied` state and async backing is disabled. On new head activation +// no new collation should be generated. +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +fn no_collation_is_distributed_for_occupied_core_with_async_backing_disabled( + #[case] runtime_version: u32, +) { + let activated_hash: Hash = [1; 32].into(); + let para_id = ParaId::from(5); + + // One core, in occupied state. The data in `CoreState` and `ClaimQueue` should match. + let cores: Vec = vec![CoreState::Occupied(polkadot_primitives::OccupiedCore { + next_up_on_available: Some(ScheduledCore { para_id, collator: None }), + occupied_since: 1, + time_out_at: 10, + next_up_on_time_out: Some(ScheduledCore { para_id, collator: None }), + availability: Default::default(), // doesn't matter + group_responsible: polkadot_primitives::GroupIndex(0), + candidate_hash: Default::default(), + candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), + })]; + let claim_queue = BTreeMap::from([(CoreIndex::from(0), VecDeque::from([para_id]))]); + + test_harness(|mut virtual_overseer| async move { + helpers::initialize_collator(&mut virtual_overseer, para_id).await; + helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + helpers::handle_runtime_calls_on_new_head_activation( + &mut virtual_overseer, + activated_hash, + AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: 0 }, + cores, + runtime_version, + claim_queue, + ) + .await; + + virtual_overseer + }); +} + +mod helpers { + use super::*; + + // Sends `Initialize` with a collator config + pub async fn initialize_collator(virtual_overseer: &mut VirtualOverseer, para_id: ParaId) { + virtual_overseer + .send(FromOrchestra::Communication { + msg: CollationGenerationMessage::Initialize(test_config(para_id)), + }) + .await; + } + + // Sends `ActiveLeaves` for a single leaf with the specified hash. Block number is hardcoded. + pub async fn activate_new_head(virtual_overseer: &mut VirtualOverseer, activated_hash: Hash) { + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { + activated: Some(ActivatedLeaf { + hash: activated_hash, + number: 10, + unpin_handle: polkadot_node_subsystem_test_helpers::mock::dummy_unpin_handle( + activated_hash, + ), + span: Arc::new(overseer::jaeger::Span::Disabled), + }), + ..Default::default() + }))) + .await; + } + + // Handle all runtime calls performed in `handle_new_activations`. Conditionally expects a + // `CLAIM_QUEUE_RUNTIME_REQUIREMENT` call if the passed `runtime_version` is greater or equal to + // `CLAIM_QUEUE_RUNTIME_REQUIREMENT` + pub async fn handle_runtime_calls_on_new_head_activation( + virtual_overseer: &mut VirtualOverseer, + activated_hash: Hash, + async_backing_params: AsyncBackingParams, + cores: Vec, + runtime_version: u32, + claim_queue: BTreeMap>, + ) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::AvailabilityCores(tx))) => { + assert_eq!(hash, activated_hash); + let _ = tx.send(Ok(cores)); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::Validators(tx))) => { + assert_eq!(hash, activated_hash); + let _ = tx.send(Ok(vec![ + Sr25519Keyring::Alice.public().into(), + Sr25519Keyring::Bob.public().into(), + Sr25519Keyring::Charlie.public().into(), + ])); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::AsyncBackingParams( + tx, + ), + )) => { + assert_eq!(hash, activated_hash); + let _ = tx.send(Ok(async_backing_params)); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::Version(tx), + )) => { + assert_eq!(hash, activated_hash); + let _ = tx.send(Ok(runtime_version)); + } + ); + + if runtime_version == RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::ClaimQueue(tx), + )) => { + assert_eq!(hash, activated_hash); + let _ = tx.send(Ok(claim_queue)); + } + ); + } + } + + // Handles all runtime requests performed in `handle_new_activations` for the case when a + // collation should be prepared for the new leaf + pub async fn handle_core_processing_for_a_leaf( + virtual_overseer: &mut VirtualOverseer, + activated_hash: Hash, + para_id: ParaId, + expected_occupied_core_assumption: OccupiedCoreAssumption, + ) { + // Some hardcoded data - if needed, extract to parameters + let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42)); + let parent_head = HeadData::from(vec![1, 2, 3]); + let pvd = PersistedValidationData { + parent_head: parent_head.clone(), + relay_parent_number: 10, + relay_parent_storage_root: Hash::repeat_byte(1), + max_pov_size: 1024, + }; + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::PersistedValidationData(id, a, tx))) => { + assert_eq!(hash, activated_hash); + assert_eq!(id, para_id); + assert_eq!(a, expected_occupied_core_assumption); + + let _ = tx.send(Ok(Some(pvd.clone()))); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::ValidationCodeHash( + id, + assumption, + tx, + ), + )) => { + assert_eq!(hash, activated_hash); + assert_eq!(id, para_id); + assert_eq!(assumption, expected_occupied_core_assumption); + + let _ = tx.send(Ok(Some(validation_code_hash))); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation{ + candidate_receipt, + parent_head_data_hash, + .. + }) => { + assert_eq!(parent_head_data_hash, parent_head.hash()); + assert_eq!(candidate_receipt.descriptor().persisted_validation_data_hash, pvd.hash()); + assert_eq!(candidate_receipt.descriptor().para_head, dummy_head_data().hash()); + assert_eq!(candidate_receipt.descriptor().validation_code_hash, validation_code_hash); + } + ); + } +} diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 5eca551db0a..9674cda9838 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::collections::btree_map::BTreeMap; +use std::collections::{btree_map::BTreeMap, VecDeque}; use schnellru::{ByLength, LruMap}; use sp_consensus_babe::Epoch; @@ -23,10 +23,11 @@ use polkadot_primitives::{ async_backing, slashing, vstaging::{self, ApprovalVotingParams}, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, + SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + ValidatorSignature, }; /// For consistency we have the same capacity for all caches. We use 128 as we'll only need that @@ -70,6 +71,7 @@ pub(crate) struct RequestResultCache { async_backing_params: LruMap, node_features: LruMap, approval_voting_params: LruMap, + claim_queue: LruMap>>, } impl Default for RequestResultCache { @@ -105,6 +107,7 @@ impl Default for RequestResultCache { para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), node_features: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + claim_queue: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), } } } @@ -525,6 +528,21 @@ impl RequestResultCache { ) { self.approval_voting_params.insert(session_index, value); } + + pub(crate) fn claim_queue( + &mut self, + relay_parent: &Hash, + ) -> Option<&BTreeMap>> { + self.claim_queue.get(relay_parent).map(|v| &*v) + } + + pub(crate) fn cache_claim_queue( + &mut self, + relay_parent: Hash, + value: BTreeMap>, + ) { + self.claim_queue.insert(relay_parent, value); + } } pub(crate) enum RequestResult { @@ -577,4 +595,5 @@ pub(crate) enum RequestResult { ParaBackingState(Hash, ParaId, Option), AsyncBackingParams(Hash, async_backing::AsyncBackingParams), NodeFeatures(SessionIndex, vstaging::NodeFeatures), + ClaimQueue(Hash, BTreeMap>), } diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index 3ff1a35d068..2b7f6fc2d60 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -177,6 +177,9 @@ where self.requests_cache.cache_async_backing_params(relay_parent, params), NodeFeatures(session_index, params) => self.requests_cache.cache_node_features(session_index, params), + ClaimQueue(relay_parent, sender) => { + self.requests_cache.cache_claim_queue(relay_parent, sender); + }, } } @@ -329,6 +332,8 @@ where Some(Request::NodeFeatures(index, sender)) } }, + Request::ClaimQueue(sender) => + query!(claim_queue(), sender).map(|sender| Request::ClaimQueue(sender)), } } @@ -626,5 +631,11 @@ where sender, result = (index) ), + Request::ClaimQueue(sender) => query!( + ClaimQueue, + claim_queue(), + ver = Request::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + sender + ), } } diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index f91723b3d39..fefd2d3f862 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -23,15 +23,16 @@ use polkadot_primitives::{ async_backing, slashing, vstaging::{ApprovalVotingParams, NodeFeatures}, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, - Slot, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, + SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, ValidatorId, + ValidatorIndex, ValidatorSignature, }; use sp_api::ApiError; use sp_core::testing::TaskExecutor; use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, VecDeque}, sync::{Arc, Mutex}, }; use test_helpers::{dummy_committed_candidate_receipt, dummy_validation_code}; @@ -286,6 +287,13 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { async fn disabled_validators(&self, _: Hash) -> Result, ApiError> { todo!("Not required for tests") } + + async fn claim_queue( + &self, + _: Hash, + ) -> Result>, ApiError> { + todo!("Not required for tests") + } } #[test] diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 23773f7e325..5115efa853c 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -45,7 +45,7 @@ use polkadot_primitives::{ async_backing, slashing, vstaging::{ApprovalVotingParams, NodeFeatures}, AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, CandidateHash, - CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreState, + CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, HeadData, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, @@ -55,7 +55,7 @@ use polkadot_primitives::{ }; use polkadot_statement_table::v2::Misbehavior; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet, VecDeque}, sync::Arc, }; @@ -729,6 +729,9 @@ pub enum RuntimeApiRequest { /// Approval voting params /// `V10` ApprovalVotingParams(SessionIndex, RuntimeApiSender), + /// Fetch the `ClaimQueue` from scheduler pallet + /// `V11` + ClaimQueue(RuntimeApiSender>>), } impl RuntimeApiRequest { @@ -763,6 +766,9 @@ impl RuntimeApiRequest { /// `approval_voting_params` pub const APPROVAL_VOTING_PARAMS_REQUIREMENT: u32 = 10; + + /// `ClaimQueue` + pub const CLAIM_QUEUE_RUNTIME_REQUIREMENT: u32 = 11; } /// A message to the Runtime API subsystem. diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 4039fc9127d..7474b4120cc 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -21,10 +21,11 @@ use polkadot_primitives::{ slashing, vstaging::{self, ApprovalVotingParams}, Block, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Header, Id, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, Hash, Header, Id, InboundDownwardMessage, InboundHrmpMessage, + OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, + SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + ValidatorSignature, }; use sc_client_api::{AuxStore, HeaderBackend}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; @@ -33,7 +34,10 @@ use sp_authority_discovery::AuthorityDiscoveryApi; use sp_blockchain::{BlockStatus, Info}; use sp_consensus_babe::{BabeApi, Epoch}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use std::{collections::BTreeMap, sync::Arc}; +use std::{ + collections::{BTreeMap, VecDeque}, + sync::Arc, +}; /// Offers header utilities. /// @@ -329,6 +333,10 @@ pub trait RuntimeApiSubsystemClient { at: Hash, session_index: SessionIndex, ) -> Result; + + // == v11: Claim queue == + /// Fetch the `ClaimQueue` from scheduler pallet + async fn claim_queue(&self, at: Hash) -> Result>, ApiError>; } /// Default implementation of [`RuntimeApiSubsystemClient`] using the client. @@ -594,6 +602,10 @@ where ) -> Result { self.client.runtime_api().approval_voting_params(at) } + + async fn claim_queue(&self, at: Hash) -> Result>, ApiError> { + self.client.runtime_api().claim_queue(at) + } } impl HeaderBackend for DefaultSubsystemClient diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index f13beb3502f..aaae30db50c 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -30,7 +30,7 @@ use polkadot_node_subsystem::{ messages::{RuntimeApiMessage, RuntimeApiRequest, RuntimeApiSender}, overseer, SubsystemSender, }; -use polkadot_primitives::{slashing, ExecutorParams}; +use polkadot_primitives::{slashing, CoreIndex, ExecutorParams}; pub use overseer::{ gen::{OrchestraError as OverseerError, Timeout}, @@ -53,7 +53,10 @@ pub use rand; use sp_application_crypto::AppCrypto; use sp_core::ByteArray; use sp_keystore::{Error as KeystoreError, KeystorePtr}; -use std::time::Duration; +use std::{ + collections::{BTreeMap, VecDeque}, + time::Duration, +}; use thiserror::Error; use vstaging::get_disabled_validators_with_fallback; @@ -304,6 +307,7 @@ specialize_requests! { fn request_submit_report_dispute_lost(dp: slashing::DisputeProof, okop: slashing::OpaqueKeyOwnershipProof) -> Option<()>; SubmitReportDisputeLost; fn request_disabled_validators() -> Vec; DisabledValidators; fn request_async_backing_params() -> AsyncBackingParams; AsyncBackingParams; + fn request_claim_queue() -> BTreeMap>; ClaimQueue; } /// Requests executor parameters from the runtime effective at given relay-parent. First obtains diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index d661005e32f..6dca33f8823 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -117,14 +117,18 @@ use crate::{ async_backing, slashing, vstaging::{self, ApprovalVotingParams}, AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature, + CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, Hash, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, + ValidatorSignature, }; use polkadot_core_primitives as pcp; use polkadot_parachain_primitives::primitives as ppp; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; +use sp_std::{ + collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + prelude::*, +}; sp_api::decl_runtime_apis! { /// The API for querying the state of parachains on-chain. @@ -281,5 +285,10 @@ sp_api::decl_runtime_apis! { /// Approval voting configuration parameters #[api_version(10)] fn approval_voting_params() -> ApprovalVotingParams; + + /***** Added in v11 *****/ + /// Claim queue + #[api_version(11)] + fn claim_queue() -> BTreeMap>; } } diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 1fee1a4097d..296b872e8d4 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,12 +16,15 @@ //! Put implementations of functions from staging APIs here. -use crate::{configuration, initializer, shared}; +use crate::{configuration, initializer, scheduler, shared}; use primitives::{ vstaging::{ApprovalVotingParams, NodeFeatures}, - ValidatorIndex, + CoreIndex, Id as ParaId, ValidatorIndex, +}; +use sp_std::{ + collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + prelude::Vec, }; -use sp_std::prelude::Vec; /// Implementation for `DisabledValidators` // CAVEAT: this should only be called on the node side @@ -38,8 +41,18 @@ pub fn node_features() -> NodeFeatures { >::config().node_features } -/// Approval voting subsystem configuration parameteres +/// Approval voting subsystem configuration parameters pub fn approval_voting_params() -> ApprovalVotingParams { let config = >::config(); config.approval_voting_params } + +/// Returns the claimqueue from the scheduler +pub fn claim_queue() -> BTreeMap> { + >::claimqueue() + .into_iter() + .map(|(core_index, entries)| { + (core_index, entries.into_iter().map(|e| e.para_id()).collect()) + }) + .collect() +} diff --git a/prdoc/pr_3580.prdoc b/prdoc/pr_3580.prdoc new file mode 100644 index 00000000000..042fcf7a1a8 --- /dev/null +++ b/prdoc/pr_3580.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Expose `ClaimQueue` via a runtime api and consume it in `collation-generation` + +doc: + - audience: Node Dev + description: | + Creates a new runtime api exposing the `ClaimQueue` from `scheduler` pallet. Consume the api + in collation generation (if available) by getting what's scheduled on a core from the + `ClaimQueue` instead of from `next_up_on_available` (from `AvailabilityCores` runtime api). + +crates: [ ] -- GitLab From bb973aa0550e59b35e8b427cebaf676f433be83a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Mar 2024 07:03:41 +0000 Subject: [PATCH 002/128] Bump anyhow from 1.0.75 to 1.0.81 (#3752) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.75 to 1.0.81.
Release notes

Sourced from anyhow's releases.

1.0.81

  • Make backtrace support available when using -Dwarnings (#354)

1.0.80

  • Fix unused_imports warnings when compiled by rustc 1.78

1.0.79

  • Work around improperly cached build script result by sccache (#340)

1.0.78

  • Reduce spurious rebuilds under RustRover IDE when using a nightly toolchain (#337)

1.0.77

1.0.76

  • Opt in to unsafe_op_in_unsafe_fn lint (#329)
Commits
  • 4aad4ed Release 1.0.81
  • 8be9091 Merge pull request #354 from dtolnay/deadcode
  • a2eb7dd Make compatible with -Dwarnings
  • 5443719 Release 1.0.80
  • dfc7bc0 Work around prelude redundant import warnings
  • 6e4f86b Import from alloc not std, where possible
  • f885a13 Ignore incompatible_msrv clippy false positives in test
  • fefbcbc Ignore incompatible_msrv clippy lint
  • 78f2d81 Update ui test suite to nightly-2024-02-08
  • edd88d3 Update ui test suite to nightly-2024-01-31
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=anyhow&package-manager=cargo&previous-version=1.0.75&new-version=1.0.81)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- substrate/client/executor/wasmtime/Cargo.toml | 2 +- substrate/frame/contracts/fixtures/Cargo.toml | 4 ++-- substrate/primitives/wasm-interface/Cargo.toml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ee813b60218..b055c79cbbf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -309,9 +309,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" [[package]] name = "approx" diff --git a/substrate/client/executor/wasmtime/Cargo.toml b/substrate/client/executor/wasmtime/Cargo.toml index 75cc76a2354..f3fef404691 100644 --- a/substrate/client/executor/wasmtime/Cargo.toml +++ b/substrate/client/executor/wasmtime/Cargo.toml @@ -30,7 +30,7 @@ wasmtime = { version = "8.0.1", default-features = false, features = [ "parallel-compilation", "pooling-allocator", ] } -anyhow = "1.0.68" +anyhow = "1.0.81" sc-allocator = { path = "../../allocator" } sc-executor-common = { path = "../common" } sp-runtime-interface = { path = "../../../primitives/runtime-interface" } diff --git a/substrate/frame/contracts/fixtures/Cargo.toml b/substrate/frame/contracts/fixtures/Cargo.toml index 5ac140cd91b..8c93c6f16f6 100644 --- a/substrate/frame/contracts/fixtures/Cargo.toml +++ b/substrate/frame/contracts/fixtures/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] frame-system = { path = "../../system" } sp-runtime = { path = "../../../primitives/runtime" } -anyhow = "1.0.0" +anyhow = "1.0.81" [build-dependencies] parity-wasm = "0.45.0" @@ -21,7 +21,7 @@ tempfile = "3.8.1" toml = "0.8.2" twox-hash = "1.6.3" polkavm-linker = { workspace = true, optional = true } -anyhow = "1.0.0" +anyhow = "1.0.81" [features] riscv = ["polkavm-linker"] diff --git a/substrate/primitives/wasm-interface/Cargo.toml b/substrate/primitives/wasm-interface/Cargo.toml index 6c051b71c8e..c05cc05ff06 100644 --- a/substrate/primitives/wasm-interface/Cargo.toml +++ b/substrate/primitives/wasm-interface/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = impl-trait-for-tuples = "0.2.2" log = { optional = true, workspace = true, default-features = true } wasmtime = { version = "8.0.1", default-features = false, optional = true } -anyhow = { version = "1.0.68", optional = true } +anyhow = { version = "1.0.81", optional = true } [features] default = ["std"] -- GitLab From 7241a8db7b3496816503c6058dae67f66c666b00 Mon Sep 17 00:00:00 2001 From: slicejoke <163888128+slicejoke@users.noreply.github.com> Date: Wed, 20 Mar 2024 18:20:59 +0800 Subject: [PATCH 003/128] Fix typos (#3753) --- bridges/bin/runtime-common/src/priority_calculator.rs | 2 +- bridges/bin/runtime-common/src/refund_relayer_extension.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bridges/bin/runtime-common/src/priority_calculator.rs b/bridges/bin/runtime-common/src/priority_calculator.rs index a597fb9e2f4..c2737128e34 100644 --- a/bridges/bin/runtime-common/src/priority_calculator.rs +++ b/bridges/bin/runtime-common/src/priority_calculator.rs @@ -128,7 +128,7 @@ mod integrity_tests { Runtime::RuntimeCall: Dispatchable, BalanceOf: Send + Sync + FixedPointOperand, { - // esimate priority of transaction that delivers one message and has large tip + // estimate priority of transaction that delivers one message and has large tip let maximal_messages_in_delivery_transaction = Runtime::MaxUnconfirmedMessagesAtInboundLane::get(); let small_with_tip_priority = diff --git a/bridges/bin/runtime-common/src/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/refund_relayer_extension.rs index bfcb82ad166..8e901d72821 100644 --- a/bridges/bin/runtime-common/src/refund_relayer_extension.rs +++ b/bridges/bin/runtime-common/src/refund_relayer_extension.rs @@ -16,7 +16,7 @@ //! Signed extension that refunds relayer if he has delivered some new messages. //! It also refunds transaction cost if the transaction is an `utility.batchAll()` -//! with calls that are: delivering new messsage and all necessary underlying headers +//! with calls that are: delivering new message and all necessary underlying headers //! (parachain or relay chain). use crate::messages_call_ext::{ -- GitLab From b686bfefba9c9f18261d8cc0ff1afc055645d436 Mon Sep 17 00:00:00 2001 From: bader y Date: Wed, 20 Mar 2024 09:26:59 -0400 Subject: [PATCH 004/128] Defensive Programming in Substrate Reference Document (#2615) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit _This PR is being continued from https://github.com/paritytech/polkadot-sdk/pull/2206, which was closed when the developer_hub was merged._ closes https://github.com/paritytech/polkadot-sdk-docs/issues/44 --- # Description This PR adds a reference document to the `developer-hub` crate (see https://github.com/paritytech/polkadot-sdk/pull/2102). This specific reference document covers defensive programming practices common within the context of developing a runtime with Substrate. In particular, this covers the following areas: - Default behavior of how Rust deals with numbers in general - How to deal with floating point numbers in runtime / fixed point arithmetic - How to deal with Integer overflows - General "safe math" / defensive programming practices for common pallet development scenarios - Defensive traits that exist within Substrate, i.e., `defensive_saturating_add `, `defensive_unwrap_or` - More general defensive programming examples (keep it concise) - Link to relevant examples where these practices are actually in production / being used - Unwrapping (or rather lack thereof) 101 todo -- - [x] Apply feedback from previous PR - [x] This may warrant a PR to append some of these docs to `sp_arithmetic` --------- Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Gonçalo Pestana Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Francisco Aguirre Co-authored-by: Radha <86818441+DrW3RK@users.noreply.github.com> --- Cargo.lock | 6 + docs/sdk/Cargo.toml | 7 + docs/sdk/src/guides/your_first_pallet/mod.rs | 6 +- .../reference_docs/defensive_programming.rs | 395 ++++++++++++++++++ docs/sdk/src/reference_docs/mod.rs | 3 +- .../safe_defensive_programming.rs | 1 - substrate/primitives/arithmetic/Cargo.toml | 3 + .../primitives/arithmetic/src/fixed_point.rs | 27 ++ substrate/primitives/arithmetic/src/lib.rs | 138 ++++-- .../primitives/arithmetic/src/per_things.rs | 36 ++ 10 files changed, 588 insertions(+), 34 deletions(-) create mode 100644 docs/sdk/src/reference_docs/defensive_programming.rs delete mode 100644 docs/sdk/src/reference_docs/safe_defensive_programming.rs diff --git a/Cargo.lock b/Cargo.lock index b055c79cbbf..9e48887e17a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13376,7 +13376,9 @@ dependencies = [ "pallet-assets", "pallet-aura", "pallet-authorship", + "pallet-babe", "pallet-balances", + "pallet-broker", "pallet-collective", "pallet-default-config-example", "pallet-democracy", @@ -13385,6 +13387,7 @@ dependencies = [ "pallet-examples", "pallet-multisig", "pallet-proxy", + "pallet-referenda", "pallet-scheduler", "pallet-timestamp", "pallet-transaction-payment", @@ -13404,6 +13407,7 @@ dependencies = [ "scale-info", "simple-mermaid", "sp-api", + "sp-arithmetic", "sp-core", "sp-io", "sp-keyring", @@ -18395,6 +18399,7 @@ name = "sp-arithmetic" version = "23.0.0" dependencies = [ "criterion 0.4.0", + "docify 0.2.7", "integer-sqrt", "num-traits", "parity-scale-codec", @@ -18403,6 +18408,7 @@ dependencies = [ "scale-info", "serde", "sp-crypto-hashing", + "sp-std 14.0.0", "static_assertions", ] diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 3f40a950c28..3f84d45640f 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -81,6 +81,13 @@ sp-api = { path = "../../substrate/primitives/api" } sp-core = { path = "../../substrate/primitives/core" } sp-keyring = { path = "../../substrate/primitives/keyring" } sp-runtime = { path = "../../substrate/primitives/runtime" } +sp-arithmetic = { path = "../../substrate/primitives/arithmetic" } + +# Misc pallet dependencies +pallet-referenda = { path = "../../substrate/frame/referenda" } +pallet-broker = { path = "../../substrate/frame/broker" } +pallet-babe = { path = "../../substrate/frame/babe" } + sp-offchain = { path = "../../substrate/primitives/offchain" } sp-version = { path = "../../substrate/primitives/version" } diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs index c633c0a69ed..c6e0dd0edf8 100644 --- a/docs/sdk/src/guides/your_first_pallet/mod.rs +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -105,8 +105,8 @@ //! This macro will call `.into()` under the hood. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_better)] //! -//! Moreover, you will learn in the [Safe Defensive Programming -//! section](crate::reference_docs::safe_defensive_programming) that it is always recommended to use +//! Moreover, you will learn in the [Defensive Programming +//! section](crate::reference_docs::defensive_programming) that it is always recommended to use //! safe arithmetic operations in your runtime. By using [`frame::traits::CheckedSub`], we can not //! only take a step in that direction, but also improve the error handing and make it slightly more //! ergonomic. @@ -294,7 +294,7 @@ //! The following topics where used in this guide, but not covered in depth. It is suggested to //! study them subsequently: //! -//! - [`crate::reference_docs::safe_defensive_programming`]. +//! - [`crate::reference_docs::defensive_programming`]. //! - [`crate::reference_docs::frame_origin`]. //! - [`crate::reference_docs::frame_runtime_types`]. //! - The pallet we wrote in this guide was using `dev_mode`, learn more in diff --git a/docs/sdk/src/reference_docs/defensive_programming.rs b/docs/sdk/src/reference_docs/defensive_programming.rs new file mode 100644 index 00000000000..9828e1b5091 --- /dev/null +++ b/docs/sdk/src/reference_docs/defensive_programming.rs @@ -0,0 +1,395 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! [Defensive programming](https://en.wikipedia.org/wiki/Defensive_programming) is a design paradigm that enables a program to continue +//! running despite unexpected behavior, input, or events that may arise in runtime. +//! Usually, unforeseen circumstances may cause the program to stop or, in the Rust context, +//! panic!. Defensive practices allow for these circumstances to be accounted for ahead of time +//! and for them to be handled gracefully, which is in line with the intended fault-tolerant and +//! deterministic nature of blockchains. +//! +//! The Polkadot SDK is built to reflect these principles and to facilitate their usage accordingly. +//! +//! ## General Overview +//! +//! When developing within the context of the Substrate runtime, there is one golden rule: +//! +//! ***DO NOT PANIC***. There are some exceptions, but generally, this is the default precedent. +//! +//! > It’s important to differentiate between the runtime and node. The runtime refers to the core +//! > business logic of a Substrate-based chain, whereas the node refers to the outer client, which +//! > deals with telemetry and gossip from other nodes. For more information, read about +//! > [Substrate's node +//! > architecture](crate::reference_docs::wasm_meta_protocol#node-vs-runtime). It’s also important +//! > to note that the criticality of the node is slightly lesser +//! > than that of the runtime, which is why you may see `unwrap()` or other “non-defensive” +//! > approaches +//! in a few places of the node's code repository. +//! +//! Most of these practices fall within Rust's +//! colloquial usage of proper error propagation, handling, and arithmetic-based edge cases. +//! +//! General guidelines: +//! +//! - **Avoid writing functions that could explicitly panic,** such as directly using `unwrap()` on +//! a [`Result`], or accessing an out-of-bounds index on a collection. Safer methods to access +//! collection types, i.e., `get()` which allow defensive handling of the resulting [`Option`] are +//! recommended to be used. +//! - **It may be acceptable to use `except()`,** but only if one is completely certain (and has +//! performed a check beforehand) that a value won't panic upon unwrapping. *Even this is +//! discouraged*, however, as future changes to that function could then cause that statement to +//! panic. It is important to ensure all possible errors are propagated and handled effectively. +//! - **If a function *can* panic,** it usually is prefaced with `unchecked_` to indicate its +//! unsafety. +//! - **If you are writing a function that could panic,** [document it!](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html#documenting-components) +//! - **Carefully handle mathematical operations.** Many seemingly, simplistic operations, such as +//! **arithmetic** in the runtime, could present a number of issues [(see more later in this +//! document)](#integer-overflow). Use checked arithmetic wherever possible. +//! +//! These guidelines could be summarized in the following example, where `bad_pop` is prone to +//! panicking, and `good_pop` allows for proper error handling to take place: +//! +//!```ignore +//! // Bad pop always requires that we return something, even if vector/array is empty. +//! fn bad_pop(v: Vec) -> T {} +//! // Good pop allows us to return None from the Option if need be. +//! fn good_pop(v: Vec) -> Option {} +//! ``` +//! +//! ### Defensive Traits +//! +//! The [`Defensive`](frame::traits::Defensive) trait provides a number of functions, all of which +//! provide an alternative to 'vanilla' Rust functions, e.g.,: +//! +//! - [`defensive_unwrap_or()`](frame::traits::Defensive::defensive_unwrap_or) instead of +//! `unwrap_or()` +//! - [`defensive_ok_or()`](frame::traits::DefensiveOption::defensive_ok_or) instead of `ok_or()` +//! +//! Defensive methods use [`debug_assertions`](https://doc.rust-lang.org/reference/conditional-compilation.html#debug_assertions), which panic in development, but in +//! production/release, they will merely log an error (i.e., `log::error`). +//! +//! The [`Defensive`](frame::traits::Defensive) trait and its various implementations can be found +//! [here](frame::traits::Defensive). +//! +//! ## Integer Overflow +//! +//! The Rust compiler prevents static overflow from happening at compile time. +//! The compiler panics in **debug** mode in the event of an integer overflow. In +//! **release** mode, it resorts to silently _wrapping_ the overflowed amount in a modular fashion +//! (from the `MAX` back to zero). +//! +//! In runtime development, we don't always have control over what is being supplied +//! as a parameter. For example, even this simple add function could present one of two outcomes +//! depending on whether it is in **release** or **debug** mode: +//! +//! ```ignore +//! fn naive_add(x: u8, y: u8) -> u8 { +//! x + y +//! } +//! ``` +//! If we passed overflow-able values at runtime, this could panic (or wrap if in release). +//! +//! ```ignore +//! naive_add(250u8, 10u8); // In debug mode, this would panic. In release, this would return 4. +//! ``` +//! +//! It is the silent portion of this behavior that presents a real issue. Such behavior should be +//! made obvious, especially in blockchain development, where unsafe arithmetic could produce +//! unexpected consequences like a user balance over or underflowing. +//! +//! Fortunately, there are ways to both represent and handle these scenarios depending on our +//! specific use case natively built into Rust and libraries like [`sp_arithmetic`]. +//! +//! ## Infallible Arithmetic +//! +//! Both Rust and Substrate provide safe ways to deal with numbers and alternatives to floating +//! point arithmetic. +//! +//! Known scenarios that could be fallible should be avoided: i.e., avoiding the possibility of +//! dividing/modulo by zero at any point should be mitigated. One should be opting for a +//! `checked_*` method to introduce safe arithmetic in their code in most cases. +//! +//! A developer should use fixed-point instead of floating-point arithmetic to mitigate the +//! potential for inaccuracy, rounding errors, or other unexpected behavior. +//! +//! - [Fixed point types](sp_arithmetic::fixed_point) and their associated usage can be found here. +//! - [PerThing](sp_arithmetic::per_things) and its associated types can be found here. +//! +//! Using floating point number types (i.e., f32. f64) in the runtime should be avoided, as a single non-deterministic result could cause chaos for blockchain consensus along with the issues above. For more on the specifics of the peculiarities of floating point calculations, [watch this video by the Computerphile](https://www.youtube.com/watch?v=PZRI1IfStY0). +//! +//! The following methods demonstrate different ways to handle numbers natively in Rust safely, +//! without fear of panic or unexpected behavior from wrapping. +//! +//! ### Checked Arithmetic +//! +//! **Checked operations** utilize an `Option` as a return type. This allows for +//! catching any unexpected behavior in the event of an overflow through simple pattern matching. +//! +//! This is an example of a valid operation: +#![doc = docify::embed!("./src/reference_docs/defensive_programming.rs", checked_add_example)] +//! +//! This is an example of an invalid operation. In this case, a simulated integer overflow, which +//! would simply result in `None`: +#![doc = docify::embed!( + "./src/reference_docs/defensive_programming.rs", + checked_add_handle_error_example +)] +//! +//! Suppose you aren’t sure which operation to use for runtime math. In that case, checked +//! operations are the safest bet, presenting two predictable (and erroring) outcomes that can be +//! handled accordingly (Some and None). +//! +//! The following conventions can be seen within the Polkadot SDK, where it is +//! handled in two ways: +//! +//! - As an [`Option`], using the `if let` / `if` or `match` +//! - As a [`Result`], via `ok_or` (or similar conversion to [`Result`] from [`Option`]) +//! +//! #### Handling via Option - More Verbose +//! +//! Because wrapped operations return `Option`, you can use a more verbose/explicit form of error +//! handling via `if` or `if let`: +#![doc = docify::embed!("./src/reference_docs/defensive_programming.rs", increase_balance)] +//! +//! Optionally, match may also be directly used in a more concise manner: +#![doc = docify::embed!("./src/reference_docs/defensive_programming.rs", increase_balance_match)] +//! +//! This is generally a useful convention for handling checked types and most types that return +//! `Option`. +//! +//! #### Handling via Result - Less Verbose +//! +//! In the Polkadot SDK codebase, checked operations are handled as a `Result` via `ok_or`. This is +//! a less verbose way of expressing the above. This usage often boils down to the developer’s +//! preference: +#![doc = docify::embed!("./src/reference_docs/defensive_programming.rs", increase_balance_result)] +//! +//! ### Saturating Operations +//! +//! Saturating a number limits it to the type’s upper or lower bound, even if the integer type +//! overflowed in runtime. For example, adding to `u32::MAX` would simply limit itself to +//! `u32::MAX`: +#![doc = docify::embed!("./src/reference_docs/defensive_programming.rs", saturated_add_example)] +//! +//! Saturating calculations can be used if one is very sure that something won't overflow, but wants +//! to avoid introducing the notion of any potential-panic or wrapping behavior. +//! +//! There is also a series of defensive alternatives via +//! [`DefensiveSaturating`](frame::traits::DefensiveSaturating), which introduces the same behavior +//! of the [`Defensive`](frame::traits::Defensive) trait, only with saturating, mathematical +//! operations: +#![doc = docify::embed!( + "./src/reference_docs/defensive_programming.rs", + saturated_defensive_example +)] +//! +//! ### Mathematical Operations in Substrate Development - Further Context +//! +//! As a recap, we covered the following concepts: +//! +//! 1. **Checked** operations - using [`Option`] or [`Result`] +//! 2. **Saturating** operations - limited to the lower and upper bounds of a number type +//! 3. **Wrapped** operations (the default) - wrap around to above or below the bounds of a type +//! +//! #### The problem with 'default' wrapped operations +//! +//! **Wrapped operations** cause the overflow to wrap around to either the maximum or minimum of +//! that type. Imagine this in the context of a blockchain, where there are account balances, voting +//! counters, nonces for transactions, and other aspects of a blockchain. +//! +//! While it may seem trivial, choosing how to handle numbers is quite important. As a thought +//! exercise, here are some scenarios of which will shed more light on when to use which. +//! +//! #### Bob's Overflowed Balance +//! +//! **Bob's** balance exceeds the `Balance` type on the `EduChain`. Because the pallet developer did +//! not handle the calculation to add to Bob's balance with any regard to this overflow, **Bob's** +//! balance is now essentially `0`, the operation **wrapped**. +//! +//!
+//! Solution: Saturating or Checked +//! For Bob's balance problems, using a `saturating_add` or `checked_add` could've mitigated +//! this issue. They simply would've reached the upper, or lower bounds, of the particular type for +//! an on-chain balance. In other words: Bob's balance would've stayed at the maximum of the +//! Balance type.
+//! +//! #### Alice's 'Underflowed' Balance +//! +//! Alice’s balance has reached `0` after a transfer to Bob. Suddenly, she has been slashed on +//! EduChain, causing her balance to reach near the limit of `u32::MAX` - a very large amount - as +//! wrapped operations can go both ways. Alice can now successfully vote using her new, overpowered +//! token balance, destroying the chain's integrity. +//! +//!
+//! Solution: Saturating +//! For Alice's balance problem, using `saturated_sub` could've mitigated this issue. A saturating +//! calculation would've simply limited her balance to the lower bound of u32, as having a negative +//! balance is not a concept within blockchains. In other words: Alice's balance would've stayed +//! at "0", even after being slashed. +//! +//! This is also an example that while one system may work in isolation, shared interfaces, such +//! as the notion of balances, are often shared across multiple pallets - meaning these small +//! changes can make a big difference depending on the scenario.
+//! +//! #### Proposal ID Overwrite +//! +//! A `u8` parameter, called `proposals_count`, represents the type for counting the number of +//! proposals on-chain. Every time a new proposal is added to the system, this number increases. +//! With the proposal pallet's high usage, it has reached `u8::MAX`’s limit of 255, causing +//! `proposals_count` to go to 0. Unfortunately, this results in new proposals overwriting old ones, +//! effectively erasing any notion of past proposals! +//! +//!
+//! Solution: Checked +//! For the proposal IDs, proper handling via `checked` math would've been suitable, +//! Saturating could've been used - but it also would've 'failed' silently. Using `checked_add` to +//! ensure that the next proposal ID would've been valid would've been a viable way to let the user +//! know the state of their proposal: +//! +//! ```ignore +//! let next_proposal_id = current_count.checked_add(1).ok_or_else(|| Error::TooManyProposals)?; +//! ``` +//! +//!
+//! +//! From the above, we can clearly see the problematic nature of seemingly simple operations in the +//! runtime, and care should be given to ensure a defensive approach is taken. +//! +//! ### Edge cases of `panic!`-able instances in Substrate +//! +//! As you traverse through the codebase (particularly in `substrate/frame`, where the majority of +//! runtime code lives), you may notice that there (only a few!) occurrences where `panic!` is used +//! explicitly. This is used when the runtime should stall, rather than keep running, as that is +//! considered safer. Particularly when it comes to mission-critical components, such as block +//! authoring, consensus, or other protocol-level dependencies, going through with an action may +//! actually cause harm to the network, and thus stalling would be the better option. +//! +//! Take the example of the BABE pallet ([`pallet_babe`]), which doesn't allow for a validator to +//! participate if it is disabled (see: [`frame::traits::DisabledValidators`]): +//! +//! ```ignore +//! if T::DisabledValidators::is_disabled(authority_index) { +//! panic!( +//! "Validator with index {:?} is disabled and should not be attempting to author blocks.", +//! authority_index, +//! ); +//! } +//! ``` +//! +//! There are other examples in various pallets, mostly those crucial to the blockchain’s +//! functionality. Most of the time, you will not be writing pallets which operate at this level, +//! but these exceptions should be noted regardless. +//! +//! ## Other Resources +//! +//! - [PBA Book - FRAME Tips & Tricks](https://polkadot-blockchain-academy.github.io/pba-book/substrate/tips-tricks/page.html?highlight=perthing#substrate-and-frame-tips-and-tricks) +#![allow(dead_code)] +#[allow(unused_variables)] +mod fake_runtime_types { + // Note: The following types are purely for the purpose of example, and do not contain any + // *real* use case other than demonstrating various concepts. + pub enum RuntimeError { + Overflow, + UserDoesntExist, + } + + pub type Address = (); + + pub struct Runtime; + + impl Runtime { + fn get_balance(account: Address) -> Result { + Ok(0u64) + } + + fn set_balance(account: Address, new_balance: u64) {} + } + + #[docify::export] + fn increase_balance(account: Address, amount: u64) -> Result<(), RuntimeError> { + // Get a user's current balance + let balance = Runtime::get_balance(account)?; + // SAFELY increase the balance by some amount + if let Some(new_balance) = balance.checked_add(amount) { + Runtime::set_balance(account, new_balance); + Ok(()) + } else { + Err(RuntimeError::Overflow) + } + } + + #[docify::export] + fn increase_balance_match(account: Address, amount: u64) -> Result<(), RuntimeError> { + // Get a user's current balance + let balance = Runtime::get_balance(account)?; + // SAFELY increase the balance by some amount + let new_balance = match balance.checked_add(amount) { + Some(balance) => balance, + None => { + return Err(RuntimeError::Overflow); + }, + }; + Runtime::set_balance(account, new_balance); + Ok(()) + } + + #[docify::export] + fn increase_balance_result(account: Address, amount: u64) -> Result<(), RuntimeError> { + // Get a user's current balance + let balance = Runtime::get_balance(account)?; + // SAFELY increase the balance by some amount - this time, by using `ok_or` + let new_balance = balance.checked_add(amount).ok_or(RuntimeError::Overflow)?; + Runtime::set_balance(account, new_balance); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use frame::traits::DefensiveSaturating; + #[docify::export] + #[test] + fn checked_add_example() { + // This is valid, as 20 is perfectly within the bounds of u32. + let add = (10u32).checked_add(10); + assert_eq!(add, Some(20)) + } + + #[docify::export] + #[test] + fn checked_add_handle_error_example() { + // This is invalid - we are adding something to the max of u32::MAX, which would overflow. + // Luckily, checked_add just marks this as None! + let add = u32::MAX.checked_add(10); + assert_eq!(add, None) + } + + #[docify::export] + #[test] + fn saturated_add_example() { + // Saturating add simply saturates + // to the numeric bound of that type if it overflows. + let add = u32::MAX.saturating_add(10); + assert_eq!(add, u32::MAX) + } + + #[docify::export] + #[test] + #[cfg_attr(debug_assertions, should_panic(expected = "Defensive failure has been triggered!"))] + fn saturated_defensive_example() { + let saturated_defensive = u32::MAX.defensive_saturating_add(10); + assert_eq!(saturated_defensive, u32::MAX); + } +} diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index de0b012bb12..a0d8d05b449 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -47,8 +47,7 @@ pub mod signed_extensions; pub mod frame_origin; /// Learn about how to write safe and defensive code in your FRAME runtime. -// TODO: @CrackTheCode016 https://github.com/paritytech/polkadot-sdk-docs/issues/44 -pub mod safe_defensive_programming; +pub mod defensive_programming; /// Learn about composite enums and other runtime level types, such as "RuntimeEvent" and /// "RuntimeCall". diff --git a/docs/sdk/src/reference_docs/safe_defensive_programming.rs b/docs/sdk/src/reference_docs/safe_defensive_programming.rs deleted file mode 100644 index 9d0f028e570..00000000000 --- a/docs/sdk/src/reference_docs/safe_defensive_programming.rs +++ /dev/null @@ -1 +0,0 @@ -//! diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 29c406b10b7..64c1025b585 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -26,6 +26,8 @@ num-traits = { version = "0.2.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } static_assertions = "1.1.0" +sp-std = { path = "../std", default-features = false } +docify = "0.2.7" [dev-dependencies] criterion = "0.4.0" @@ -41,6 +43,7 @@ std = [ "scale-info/std", "serde/std", "sp-crypto-hashing/std", + "sp-std/std", ] # Serde support without relying on std features. serde = ["dep:serde", "scale-info/serde"] diff --git a/substrate/primitives/arithmetic/src/fixed_point.rs b/substrate/primitives/arithmetic/src/fixed_point.rs index 46c09df2186..736a900bde2 100644 --- a/substrate/primitives/arithmetic/src/fixed_point.rs +++ b/substrate/primitives/arithmetic/src/fixed_point.rs @@ -16,6 +16,33 @@ // limitations under the License. //! Decimal Fixed Point implementations for Substrate runtime. +//! Similar to types that implement [`PerThing`](crate::per_things), these are also +//! fixed-point types, however, they are able to represent larger fractions: +#![doc = docify::embed!("./src/lib.rs", fixed_u64)] +//! +//! ### Fixed Point Types in Practice +//! +//! If one needs to exceed the value of one (1), then +//! [`FixedU64`](FixedU64) (and its signed and `u128` counterparts) can be utilized. +//! Take for example this very rudimentary pricing mechanism, where we wish to calculate the demand +//! / supply to get a price for some on-chain compute: +#![doc = docify::embed!( + "./src/lib.rs", + fixed_u64_block_computation_example +)] +//! +//! For a much more comprehensive example, be sure to look at the source for broker (the "coretime") +//! pallet. +//! +//! #### Fixed Point Types in Practice +//! +//! Just as with [`PerThing`](PerThing), you can also perform regular mathematical +//! expressions: +#![doc = docify::embed!( + "./src/lib.rs", + fixed_u64_operation_example +)] +//! use crate::{ helpers_128bit::{multiply_by_rational_with_rounding, sqrt}, diff --git a/substrate/primitives/arithmetic/src/lib.rs b/substrate/primitives/arithmetic/src/lib.rs index 33992e15423..01c403a7c4a 100644 --- a/substrate/primitives/arithmetic/src/lib.rs +++ b/substrate/primitives/arithmetic/src/lib.rs @@ -101,7 +101,7 @@ where fn tcmp(&self, other: &T, threshold: T) -> Ordering { // early exit. if threshold.is_zero() { - return self.cmp(other) + return self.cmp(other); } let upper_bound = other.saturating_add(threshold); @@ -206,12 +206,12 @@ where // Nothing to do here. if count.is_zero() { - return Ok(Vec::::new()) + return Ok(Vec::::new()); } let diff = targeted_sum.max(sum) - targeted_sum.min(sum); if diff.is_zero() { - return Ok(input.to_vec()) + return Ok(input.to_vec()); } let needs_bump = targeted_sum > sum; @@ -254,7 +254,7 @@ where min_index += 1; min_index %= count; } - leftover -= One::one() + leftover -= One::one(); } } else { // must decrease the stakes a bit. decrement from the max element. index of maximum is now @@ -288,7 +288,7 @@ where if output_with_idx[max_index].1 <= threshold { max_index = max_index.checked_sub(1).unwrap_or(count - 1); } - leftover -= One::one() + leftover -= One::one(); } else { max_index = max_index.checked_sub(1).unwrap_or(count - 1); } @@ -300,7 +300,7 @@ where targeted_sum, "sum({:?}) != {:?}", output_with_idx, - targeted_sum, + targeted_sum ); // sort again based on the original index. @@ -356,7 +356,7 @@ mod normalize_tests { vec![ Perbill::from_parts(333333334), Perbill::from_parts(333333333), - Perbill::from_parts(333333333), + Perbill::from_parts(333333333) ] ); @@ -367,7 +367,7 @@ mod normalize_tests { vec![ Perbill::from_parts(316666668), Perbill::from_parts(383333332), - Perbill::from_parts(300000000), + Perbill::from_parts(300000000) ] ); } @@ -378,13 +378,13 @@ mod normalize_tests { // could have a situation where the sum cannot be calculated in the inner type. Calculating // using the upper type of the per_thing should assure this to be okay. assert_eq!( - vec![PerU16::from_percent(40), PerU16::from_percent(40), PerU16::from_percent(40),] + vec![PerU16::from_percent(40), PerU16::from_percent(40), PerU16::from_percent(40)] .normalize(PerU16::one()) .unwrap(), vec![ PerU16::from_parts(21845), // 33% PerU16::from_parts(21845), // 33% - PerU16::from_parts(21845), // 33% + PerU16::from_parts(21845) // 33% ] ); } @@ -428,6 +428,88 @@ mod normalize_tests { } } +#[cfg(test)] +mod per_and_fixed_examples { + use super::*; + + #[docify::export] + #[test] + fn percent_mult() { + let percent = Percent::from_rational(5u32, 100u32); // aka, 5% + let five_percent_of_100 = percent * 100u32; // 5% of 100 is 5. + assert_eq!(five_percent_of_100, 5) + } + #[docify::export] + #[test] + fn perbill_example() { + let p = Perbill::from_percent(80); + // 800000000 bil, or a representative of 0.800000000. + // Precision is in the billions place. + assert_eq!(p.deconstruct(), 800000000); + } + + #[docify::export] + #[test] + fn percent_example() { + let percent = Percent::from_rational(190u32, 400u32); + assert_eq!(percent.deconstruct(), 47); + } + + #[docify::export] + #[test] + fn fixed_u64_block_computation_example() { + // Calculate a very rudimentary on-chain price from supply / demand + // Supply: Cores available per block + // Demand: Cores being ordered per block + let price = FixedU64::from_rational(5u128, 10u128); + + // 0.5 DOT per core + assert_eq!(price, FixedU64::from_float(0.5)); + + // Now, the story has changed - lots of demand means we buy as many cores as there + // available. This also means that price goes up! For the sake of simplicity, we don't care + // about who gets a core - just about our very simple price model + + // Calculate a very rudimentary on-chain price from supply / demand + // Supply: Cores available per block + // Demand: Cores being ordered per block + let price = FixedU64::from_rational(19u128, 10u128); + + // 1.9 DOT per core + assert_eq!(price, FixedU64::from_float(1.9)); + } + + #[docify::export] + #[test] + fn fixed_u64() { + // The difference between this and perthings is perthings operates within the relam of [0, + // 1] In cases where we need > 1, we can used fixed types such as FixedU64 + + let rational_1 = FixedU64::from_rational(10, 5); //" 200%" aka 2. + let rational_2 = FixedU64::from_rational_with_rounding(5, 10, Rounding::Down); // "50%" aka 0.50... + + assert_eq!(rational_1, (2u64).into()); + assert_eq!(rational_2.into_perbill(), Perbill::from_float(0.5)); + } + + #[docify::export] + #[test] + fn fixed_u64_operation_example() { + let rational_1 = FixedU64::from_rational(10, 5); // "200%" aka 2. + let rational_2 = FixedU64::from_rational(8, 5); // "160%" aka 1.6. + + let addition = rational_1 + rational_2; + let multiplication = rational_1 * rational_2; + let division = rational_1 / rational_2; + let subtraction = rational_1 - rational_2; + + assert_eq!(addition, FixedU64::from_float(3.6)); + assert_eq!(multiplication, FixedU64::from_float(3.2)); + assert_eq!(division, FixedU64::from_float(1.25)); + assert_eq!(subtraction, FixedU64::from_float(0.4)); + } +} + #[cfg(test)] mod threshold_compare_tests { use super::*; @@ -440,15 +522,15 @@ mod threshold_compare_tests { let e = Perbill::from_percent(10).mul_ceil(b); // [115 - 11,5 (103,5), 115 + 11,5 (126,5)] is all equal - assert_eq!(103u32.tcmp(&b, e), Ordering::Equal); - assert_eq!(104u32.tcmp(&b, e), Ordering::Equal); - assert_eq!(115u32.tcmp(&b, e), Ordering::Equal); - assert_eq!(120u32.tcmp(&b, e), Ordering::Equal); - assert_eq!(126u32.tcmp(&b, e), Ordering::Equal); - assert_eq!(127u32.tcmp(&b, e), Ordering::Equal); - - assert_eq!(128u32.tcmp(&b, e), Ordering::Greater); - assert_eq!(102u32.tcmp(&b, e), Ordering::Less); + assert_eq!((103u32).tcmp(&b, e), Ordering::Equal); + assert_eq!((104u32).tcmp(&b, e), Ordering::Equal); + assert_eq!((115u32).tcmp(&b, e), Ordering::Equal); + assert_eq!((120u32).tcmp(&b, e), Ordering::Equal); + assert_eq!((126u32).tcmp(&b, e), Ordering::Equal); + assert_eq!((127u32).tcmp(&b, e), Ordering::Equal); + + assert_eq!((128u32).tcmp(&b, e), Ordering::Greater); + assert_eq!((102u32).tcmp(&b, e), Ordering::Less); } #[test] @@ -458,15 +540,15 @@ mod threshold_compare_tests { let e = Perbill::from_parts(100) * b; // [115 - 11,5 (103,5), 115 + 11,5 (126,5)] is all equal - assert_eq!(103u32.tcmp(&b, e), 103u32.cmp(&b)); - assert_eq!(104u32.tcmp(&b, e), 104u32.cmp(&b)); - assert_eq!(115u32.tcmp(&b, e), 115u32.cmp(&b)); - assert_eq!(120u32.tcmp(&b, e), 120u32.cmp(&b)); - assert_eq!(126u32.tcmp(&b, e), 126u32.cmp(&b)); - assert_eq!(127u32.tcmp(&b, e), 127u32.cmp(&b)); - - assert_eq!(128u32.tcmp(&b, e), 128u32.cmp(&b)); - assert_eq!(102u32.tcmp(&b, e), 102u32.cmp(&b)); + assert_eq!((103u32).tcmp(&b, e), (103u32).cmp(&b)); + assert_eq!((104u32).tcmp(&b, e), (104u32).cmp(&b)); + assert_eq!((115u32).tcmp(&b, e), (115u32).cmp(&b)); + assert_eq!((120u32).tcmp(&b, e), (120u32).cmp(&b)); + assert_eq!((126u32).tcmp(&b, e), (126u32).cmp(&b)); + assert_eq!((127u32).tcmp(&b, e), (127u32).cmp(&b)); + + assert_eq!((128u32).tcmp(&b, e), (128u32).cmp(&b)); + assert_eq!((102u32).tcmp(&b, e), (102u32).cmp(&b)); } #[test] diff --git a/substrate/primitives/arithmetic/src/per_things.rs b/substrate/primitives/arithmetic/src/per_things.rs index 057bfd7bf88..f73dbe30cec 100644 --- a/substrate/primitives/arithmetic/src/per_things.rs +++ b/substrate/primitives/arithmetic/src/per_things.rs @@ -15,6 +15,42 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! Types that implement [`PerThing`](PerThing) can be used as a floating-point alternative for +//! numbers that operate within the realm of `[0, 1]`. The primary types may you encounter in +//! Substrate would be the following: +//! - [`Percent`](Percent) - parts of one hundred. +//! - [`Permill`](Permill) - parts of a million. +//! - [`Perbill`](Perbill) - parts of a billion. +//! +//! In use, you may see them being used as follows: +//! +//! > **[`Perbill`](Perbill), parts of a billion** +#![doc = docify::embed!("./src/lib.rs", perbill_example)] +//! > **[`Percent`](Percent), parts of a hundred** +#![doc = docify::embed!("./src/lib.rs", percent_example)] +//! +//! Note that `Percent` is represented as a _rounded down_, fixed point +//! number (see the example above). Unlike primitive types, types that implement +//! [`PerThing`](PerThing) will also not overflow, and are therefore safe to use. +//! They adopt the same behavior that a saturated calculation would provide, meaning that if one is +//! to go over "100%", it wouldn't overflow, but simply stop at the upper or lower bound. +//! +//! For use cases which require precision beyond the range of `[0, 1]`, there are fixed-point types +//! which can be used. +//! +//! Each of these can be used to construct and represent ratios within our runtime. +//! You will find types like [`Perbill`](Perbill) being used often in pallet +//! development. `pallet_referenda` is a good example of a pallet which makes good use of fixed +//! point arithmetic, as it relies on representing various curves and thresholds relating to +//! governance. +//! +//! #### Fixed Point Arithmetic with [`PerThing`](PerThing) +//! +//! As stated, one can also perform mathematics using these types directly. For example, finding the +//! percentage of a particular item: + +#![doc = docify::embed!("./src/lib.rs", percent_mult)] + #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -- GitLab From b74353d3e94ca685e953f5138f0676214140dcdd Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 20 Mar 2024 14:53:55 +0100 Subject: [PATCH 005/128] Fix algorithmic complexity of on-demand scheduler with regards to number of cores. (#3190) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We witnessed really poor performance on Rococo, where we ended up with 50 on-demand cores. This was due to the fact that for each core the full queue was processed. With this change full queue processing will happen way less often (most of the time complexity is O(1) or O(log(n))) and if it happens then only for one core (in expectation). Also spot price is now updated before each order to ensure economic back pressure. TODO: - [x] Implement - [x] Basic tests - [x] Add more tests (see todos) - [x] Run benchmark to confirm better performance, first results suggest > 100x faster. - [x] Write migrations - [x] Bump scale-info version and remove patch in Cargo.toml - [x] Write PR docs: on-demand performance improved, more on-demand cores are now non problematic anymore. If need by also the max queue size can be increased again. (Maybe not to 10k) Optional: Performance can be improved even more, if we called `pop_assignment_for_core()`, before calling `report_processed` (Avoid needless affinity drops). The effect gets smaller the larger the claim queue and I would only go for it, if it does not add complexity to the scheduler. --------- Co-authored-by: eskimor Co-authored-by: antonva Co-authored-by: command-bot <> Co-authored-by: Anton Vilhelm Ásgeirsson Co-authored-by: ordian --- Cargo.lock | 8 +- polkadot/primitives/src/lib.rs | 3 +- polkadot/primitives/src/v6/mod.rs | 7 + polkadot/runtime/parachains/Cargo.toml | 2 +- .../src/assigner_on_demand/benchmarking.rs | 11 +- .../src/assigner_on_demand/migration.rs | 181 +++++ .../parachains/src/assigner_on_demand/mod.rs | 690 ++++++++++++------ .../src/assigner_on_demand/tests.rs | 514 +++++++------ .../runtime/parachains/src/configuration.rs | 10 +- polkadot/runtime/rococo/src/lib.rs | 1 + .../runtime_parachains_assigner_on_demand.rs | 72 +- .../runtime_parachains_assigner_on_demand.rs | 76 +- prdoc/pr_3190.prdoc | 17 + 13 files changed, 1046 insertions(+), 546 deletions(-) create mode 100644 polkadot/runtime/parachains/src/assigner_on_demand/migration.rs create mode 100644 prdoc/pr_3190.prdoc diff --git a/Cargo.lock b/Cargo.lock index 9e48887e17a..5401dc5ecfb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17005,9 +17005,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7d66a1128282b7ef025a8ead62a4a9fcf017382ec53b8ffbf4d7bf77bd3c60" +checksum = "2ef2175c2907e7c8bc0a9c3f86aeb5ec1f3b275300ad58a44d0c3ae379a5e52e" dependencies = [ "bitvec", "cfg-if", @@ -17019,9 +17019,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" +checksum = "634d9b8eb8fd61c5cdd3390d9b2132300a7e7618955b98b8416f118c1b4e144f" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 2ddd9b58dfe..745195ce092 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -58,7 +58,8 @@ pub use v6::{ ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, ValidityError, ASSIGNMENT_KEY_TYPE_ID, LEGACY_MIN_BACKING_VOTES, LOWEST_PUBLIC_ID, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, MIN_CODE_SIZE, - ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, PARACHAIN_KEY_TYPE_ID, + ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, + PARACHAIN_KEY_TYPE_ID, }; #[cfg(feature = "std")] diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index 742dbed1cd8..9e7f910314c 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -399,6 +399,13 @@ pub const MAX_POV_SIZE: u32 = 5 * 1024 * 1024; /// Can be adjusted in configuration. pub const ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE: u32 = 10_000; +/// Maximum for maximum queue size. +/// +/// Setting `on_demand_queue_max_size` to a value higher than this is unsound. This is more a +/// theoretical limit, just below enough what the target type supports, so comparisons are possible +/// even with indices that are overflowing the underyling type. +pub const ON_DEMAND_MAX_QUEUE_MAX_SIZE: u32 = 1_000_000_000; + /// Backing votes threshold used from the host prior to runtime API version 6 and from the runtime /// prior to v9 configuration migration. pub const LEGACY_MIN_BACKING_VOTES: u32 = 2; diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 61040145476..6e693b83ae1 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -15,7 +15,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } derive_more = "0.99.17" bitflags = "1.3.2" diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs index 8360e7a78d0..779d6f04e39 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs @@ -70,11 +70,7 @@ mod benchmarks { let para_id = ParaId::from(111u32); init_parathread::(para_id); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let order = EnqueuedOrder::new(para_id); - - for _ in 0..s { - Pallet::::add_on_demand_order(order.clone(), QueuePushDirection::Back).unwrap(); - } + Pallet::::populate_queue(para_id, s); #[extrinsic_call] _(RawOrigin::Signed(caller.into()), BalanceOf::::max_value(), para_id) @@ -87,11 +83,8 @@ mod benchmarks { let para_id = ParaId::from(111u32); init_parathread::(para_id); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let order = EnqueuedOrder::new(para_id); - for _ in 0..s { - Pallet::::add_on_demand_order(order.clone(), QueuePushDirection::Back).unwrap(); - } + Pallet::::populate_queue(para_id, s); #[extrinsic_call] _(RawOrigin::Signed(caller.into()), BalanceOf::::max_value(), para_id) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs new file mode 100644 index 00000000000..5071653377d --- /dev/null +++ b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs @@ -0,0 +1,181 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. +use super::*; +use frame_support::{ + migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias, + traits::OnRuntimeUpgrade, weights::Weight, +}; + +mod v0 { + use super::*; + use sp_std::collections::vec_deque::VecDeque; + + #[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone)] + pub(super) struct EnqueuedOrder { + pub para_id: ParaId, + } + + /// Keeps track of the multiplier used to calculate the current spot price for the on demand + /// assigner. + /// NOTE: Ignoring the `OnEmpty` field for the migration. + #[storage_alias] + pub(super) type SpotTraffic = StorageValue, FixedU128, ValueQuery>; + + /// The order storage entry. Uses a VecDeque to be able to push to the front of the + /// queue from the scheduler on session boundaries. + /// NOTE: Ignoring the `OnEmpty` field for the migration. + #[storage_alias] + pub(super) type OnDemandQueue = + StorageValue, VecDeque, ValueQuery>; +} + +mod v1 { + use super::*; + + use crate::assigner_on_demand::LOG_TARGET; + + /// Migration to V1 + pub struct UncheckedMigrateToV1(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for UncheckedMigrateToV1 { + fn on_runtime_upgrade() -> Weight { + let mut weight: Weight = Weight::zero(); + + // Migrate the current traffic value + let config = >::config(); + QueueStatus::::mutate(|mut queue_status| { + Pallet::::update_spot_traffic(&config, &mut queue_status); + + let v0_queue = v0::OnDemandQueue::::take(); + // Process the v0 queue into v1. + v0_queue.into_iter().for_each(|enqueued_order| { + // Readding the old orders will use the new systems. + Pallet::::add_on_demand_order( + queue_status, + enqueued_order.para_id, + QueuePushDirection::Back, + ); + }); + }); + + // Remove the old storage. + v0::OnDemandQueue::::kill(); // 1 write + v0::SpotTraffic::::kill(); // 1 write + + // Config read + weight.saturating_accrue(T::DbWeight::get().reads(1)); + // QueueStatus read write (update_spot_traffic) + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + // Kill x 2 + weight.saturating_accrue(T::DbWeight::get().writes(2)); + + log::info!(target: LOG_TARGET, "Migrated on demand assigner storage to v1"); + weight + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + let n: u32 = v0::OnDemandQueue::::get().len() as u32; + + log::info!( + target: LOG_TARGET, + "Number of orders waiting in the queue before: {n}", + ); + + Ok(n.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::info!(target: LOG_TARGET, "Running post_upgrade()"); + + ensure!( + v0::OnDemandQueue::::get().is_empty(), + "OnDemandQueue should be empty after the migration" + ); + + let expected_len = u32::decode(&mut &state[..]).unwrap(); + let queue_status_size = QueueStatus::::get().size(); + ensure!( + expected_len == queue_status_size, + "Number of orders should be the same before and after migration" + ); + + let n_affinity_entries: u32 = + AffinityEntries::::iter().map(|(_index, heap)| heap.len() as u32).sum(); + let n_para_id_affinity: u32 = ParaIdAffinity::::iter() + .map(|(_para_id, affinity)| affinity.count as u32) + .sum(); + ensure!( + n_para_id_affinity == n_affinity_entries, + "Number of affinity entries should be the same as the counts in ParaIdAffinity" + ); + + Ok(()) + } + } +} + +/// Migrate `V0` to `V1` of the storage format. +pub type MigrateV0ToV1 = VersionedMigration< + 0, + 1, + v1::UncheckedMigrateToV1, + Pallet, + ::DbWeight, +>; + +#[cfg(test)] +mod tests { + use super::{v0, v1, OnRuntimeUpgrade, Weight}; + use crate::mock::{new_test_ext, MockGenesisConfig, OnDemandAssigner, Test}; + use primitives::Id as ParaId; + + #[test] + fn migration_to_v1_preserves_queue_ordering() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + // Place orders for paraids 1..5 + for i in 1..=5 { + v0::OnDemandQueue::::mutate(|queue| { + queue.push_back(v0::EnqueuedOrder { para_id: ParaId::new(i) }) + }); + } + + // Queue has 5 orders + let old_queue = v0::OnDemandQueue::::get(); + assert_eq!(old_queue.len(), 5); + // New queue has 0 orders + assert_eq!(OnDemandAssigner::get_queue_status().size(), 0); + + // For tests, db weight is zero. + assert_eq!( + as OnRuntimeUpgrade>::on_runtime_upgrade(), + Weight::zero() + ); + + // New queue has 5 orders + assert_eq!(OnDemandAssigner::get_queue_status().size(), 5); + + // Compare each entry from the old queue with the entry in the new queue. + old_queue.iter().zip(OnDemandAssigner::get_free_entries().iter()).for_each( + |(old_enq, new_enq)| { + assert_eq!(old_enq.para_id, new_enq.para_id); + }, + ); + }); + } +} diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index bc450dc7812..c47c8745e65 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -16,22 +16,32 @@ //! The parachain on demand assignment module. //! -//! Implements a mechanism for taking in orders for pay as you go (PAYG) or on demand -//! parachain (previously parathreads) assignments. This module is not handled by the -//! initializer but is instead instantiated in the `construct_runtime` macro. +//! Implements a mechanism for taking in orders for on-demand parachain (previously parathreads) +//! assignments. This module is not handled by the initializer but is instead instantiated in the +//! `construct_runtime` macro. //! //! The module currently limits parallel execution of blocks from the same `ParaId` via //! a core affinity mechanism. As long as there exists an affinity for a `CoreIndex` for //! a specific `ParaId`, orders for blockspace for that `ParaId` will only be assigned to -//! that `CoreIndex`. This affinity mechanism can be removed if it can be shown that parallel -//! execution is valid. +//! that `CoreIndex`. +//! +//! NOTE: Once we have elastic scaling implemented we might want to extend this module to support +//! ignoring core affinity up to a certain extend. This should be opt-in though as the parachain +//! needs to support multiple cores in the same block. If we want to enable a single parachain +//! occupying multiple cores in on-demand, we will likely add a separate order type, where the +//! intent can be made explicit. mod benchmarking; +pub mod migration; mod mock_helpers; +extern crate alloc; + #[cfg(test)] mod tests; +use core::mem::take; + use crate::{configuration, paras, scheduler::common::Assignment}; use frame_support::{ @@ -43,13 +53,17 @@ use frame_support::{ }, }; use frame_system::pallet_prelude::*; -use primitives::{CoreIndex, Id as ParaId}; +use primitives::{CoreIndex, Id as ParaId, ON_DEMAND_MAX_QUEUE_MAX_SIZE}; use sp_runtime::{ traits::{One, SaturatedConversion}, FixedPointNumber, FixedPointOperand, FixedU128, Perbill, Saturating, }; -use sp_std::{collections::vec_deque::VecDeque, prelude::*}; +use alloc::collections::BinaryHeap; +use sp_std::{ + cmp::{Ord, Ordering, PartialOrd}, + prelude::*, +}; const LOG_TARGET: &str = "runtime::parachains::assigner-on-demand"; @@ -73,17 +87,116 @@ impl WeightInfo for TestWeightInfo { } } +/// Meta data for full queue. +/// +/// This includes elements with affinity and free entries. +/// +/// The actual queue is implemented via multiple priority queues. One for each core, for entries +/// which currently have a core affinity and one free queue, with entries without any affinity yet. +/// +/// The design aims to have most queue accessess be O(1) or O(log(N)). Absolute worst case is O(N). +/// Importantly this includes all accessess that happen in a single block. Even with 50 cores, the +/// total complexity of all operations in the block should maintain above complexities. In +/// particular O(N) stays O(N), it should never be O(N*cores). +/// +/// More concrete rundown on complexity: +/// +/// - insert: O(1) for placing an order, O(log(N)) for push backs. +/// - pop_assignment_for_core: O(log(N)), O(N) worst case: Can only happen for one core, next core +/// is already less work. +/// - report_processed & push back: If affinity dropped to 0, then O(N) in the worst case. Again +/// this divides per core. +/// +/// Reads still exist, also improved slightly, but worst case we fetch all entries. +#[derive(Encode, Decode, TypeInfo)] +struct QueueStatusType { + /// Last calculated traffic value. + traffic: FixedU128, + /// The next index to use. + next_index: QueueIndex, + /// Smallest index still in use. + /// + /// In case of a completely empty queue (free + affinity queues), `next_index - smallest_index + /// == 0`. + smallest_index: QueueIndex, + /// Indices that have been freed already. + /// + /// But have a hole to `smallest_index`, so we can not yet bump `smallest_index`. This binary + /// heap is roughly bounded in the number of on demand cores: + /// + /// For a single core, elements will always be processed in order. With each core added, a + /// level of out of order execution is added. + freed_indices: BinaryHeap, +} + +impl Default for QueueStatusType { + fn default() -> QueueStatusType { + QueueStatusType { + traffic: FixedU128::default(), + next_index: QueueIndex(0), + smallest_index: QueueIndex(0), + freed_indices: BinaryHeap::new(), + } + } +} + +impl QueueStatusType { + /// How many orders are queued in total? + /// + /// This includes entries which have core affinity. + fn size(&self) -> u32 { + self.next_index + .0 + .overflowing_sub(self.smallest_index.0) + .0 + .saturating_sub(self.freed_indices.len() as u32) + } + + /// Get current next index + /// + /// to use for an element newly pushed to the back of the queue. + fn push_back(&mut self) -> QueueIndex { + let QueueIndex(next_index) = self.next_index; + self.next_index = QueueIndex(next_index.overflowing_add(1).0); + QueueIndex(next_index) + } + + /// Push something to the front of the queue + fn push_front(&mut self) -> QueueIndex { + self.smallest_index = QueueIndex(self.smallest_index.0.overflowing_sub(1).0); + self.smallest_index + } + + /// The given index is no longer part of the queue. + /// + /// This updates `smallest_index` if need be. + fn consume_index(&mut self, removed_index: QueueIndex) { + if removed_index != self.smallest_index { + self.freed_indices.push(removed_index.reverse()); + return + } + let mut index = self.smallest_index.0.overflowing_add(1).0; + // Even more to advance? + while self.freed_indices.peek() == Some(&ReverseQueueIndex(index)) { + index = index.overflowing_add(1).0; + self.freed_indices.pop(); + } + self.smallest_index = QueueIndex(index); + } +} + /// Keeps track of how many assignments a scheduler currently has at a specific `CoreIndex` for a /// specific `ParaId`. #[derive(Encode, Decode, Default, Clone, Copy, TypeInfo)] #[cfg_attr(test, derive(PartialEq, RuntimeDebug))] -pub struct CoreAffinityCount { - core_idx: CoreIndex, +struct CoreAffinityCount { + core_index: CoreIndex, count: u32, } /// An indicator as to which end of the `OnDemandQueue` an assignment will be placed. -pub enum QueuePushDirection { +#[cfg_attr(test, derive(RuntimeDebug))] +enum QueuePushDirection { Back, Front, } @@ -93,9 +206,8 @@ type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// Errors that can happen during spot traffic calculation. -#[derive(PartialEq)] -#[cfg_attr(feature = "std", derive(Debug))] -pub enum SpotTrafficCalculationErr { +#[derive(PartialEq, RuntimeDebug)] +enum SpotTrafficCalculationErr { /// The order queue capacity is at 0. QueueCapacityIsZero, /// The queue size is larger than the queue capacity. @@ -104,15 +216,85 @@ pub enum SpotTrafficCalculationErr { Division, } +/// Type used for priority indices. +// NOTE: The `Ord` implementation for this type is unsound in the general case. +// Do not use it for anything but it's intended purpose. +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] +struct QueueIndex(u32); + +/// QueueIndex with reverse ordering. +/// +/// Same as `Reverse(QueueIndex)`, but with all the needed traits implemented. +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] +struct ReverseQueueIndex(u32); + +impl QueueIndex { + fn reverse(self) -> ReverseQueueIndex { + ReverseQueueIndex(self.0) + } +} + +impl Ord for QueueIndex { + fn cmp(&self, other: &Self) -> Ordering { + let diff = self.0.overflowing_sub(other.0).0; + if diff == 0 { + Ordering::Equal + } else if diff <= ON_DEMAND_MAX_QUEUE_MAX_SIZE { + Ordering::Greater + } else { + Ordering::Less + } + } +} + +impl PartialOrd for QueueIndex { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for ReverseQueueIndex { + fn cmp(&self, other: &Self) -> Ordering { + QueueIndex(other.0).cmp(&QueueIndex(self.0)) + } +} +impl PartialOrd for ReverseQueueIndex { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(&other)) + } +} + /// Internal representation of an order after it has been enqueued already. -#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone)] -pub(super) struct EnqueuedOrder { - pub para_id: ParaId, +/// +/// This data structure is provided for a min BinaryHeap (Ord compares in reverse order with regards +/// to its elements) +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq)] +struct EnqueuedOrder { + para_id: ParaId, + idx: QueueIndex, } impl EnqueuedOrder { - pub fn new(para_id: ParaId) -> Self { - Self { para_id } + fn new(idx: QueueIndex, para_id: ParaId) -> Self { + Self { idx, para_id } + } +} + +impl PartialOrd for EnqueuedOrder { + fn partial_cmp(&self, other: &Self) -> Option { + match other.idx.partial_cmp(&self.idx) { + Some(Ordering::Equal) => other.para_id.partial_cmp(&self.para_id), + o => o, + } + } +} + +impl Ord for EnqueuedOrder { + fn cmp(&self, other: &Self) -> Ordering { + match other.idx.cmp(&self.idx) { + Ordering::Equal => other.para_id.cmp(&self.para_id), + o => o, + } } } @@ -121,8 +303,11 @@ pub mod pallet { use super::*; + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] @@ -141,36 +326,44 @@ pub mod pallet { type TrafficDefaultValue: Get; } - /// Creates an empty spot traffic value if one isn't present in storage already. + /// Creates an empty queue status for an empty queue with initial traffic value. #[pallet::type_value] - pub fn SpotTrafficOnEmpty() -> FixedU128 { - T::TrafficDefaultValue::get() + pub(super) fn QueueStatusOnEmpty() -> QueueStatusType { + QueueStatusType { traffic: T::TrafficDefaultValue::get(), ..Default::default() } } - /// Creates an empty on demand queue if one isn't present in storage already. #[pallet::type_value] - pub(super) fn OnDemandQueueOnEmpty() -> VecDeque { - VecDeque::new() + pub(super) fn EntriesOnEmpty() -> BinaryHeap { + BinaryHeap::new() } - /// Keeps track of the multiplier used to calculate the current spot price for the on demand - /// assigner. - #[pallet::storage] - pub(super) type SpotTraffic = - StorageValue<_, FixedU128, ValueQuery, SpotTrafficOnEmpty>; - - /// The order storage entry. Uses a VecDeque to be able to push to the front of the - /// queue from the scheduler on session boundaries. - #[pallet::storage] - pub(super) type OnDemandQueue = - StorageValue<_, VecDeque, ValueQuery, OnDemandQueueOnEmpty>; - /// Maps a `ParaId` to `CoreIndex` and keeps track of how many assignments the scheduler has in /// it's lookahead. Keeping track of this affinity prevents parallel execution of the same /// `ParaId` on two or more `CoreIndex`es. #[pallet::storage] pub(super) type ParaIdAffinity = - StorageMap<_, Twox256, ParaId, CoreAffinityCount, OptionQuery>; + StorageMap<_, Twox64Concat, ParaId, CoreAffinityCount, OptionQuery>; + + /// Overall status of queue (both free + affinity entries) + #[pallet::storage] + pub(super) type QueueStatus = + StorageValue<_, QueueStatusType, ValueQuery, QueueStatusOnEmpty>; + + /// Priority queue for all orders which don't yet (or not any more) have any core affinity. + #[pallet::storage] + pub(super) type FreeEntries = + StorageValue<_, BinaryHeap, ValueQuery, EntriesOnEmpty>; + + /// Queue entries that are currently bound to a particular core due to core affinity. + #[pallet::storage] + pub(super) type AffinityEntries = StorageMap< + _, + Twox64Concat, + CoreIndex, + BinaryHeap, + ValueQuery, + EntriesOnEmpty, + >; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] @@ -183,9 +376,6 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// The `ParaId` supplied to the `place_order` call is not a valid `ParaThread`, making the - /// call is invalid. - InvalidParaId, /// The order queue is full, `place_order` will not continue. QueueFull, /// The current spot price is higher than the max amount specified in the `place_order` @@ -197,45 +387,14 @@ pub mod pallet { impl Hooks> for Pallet { fn on_initialize(_now: BlockNumberFor) -> Weight { let config = >::config(); - // Calculate spot price multiplier and store it. - let old_traffic = SpotTraffic::::get(); - match Self::calculate_spot_traffic( - old_traffic, - config.scheduler_params.on_demand_queue_max_size, - Self::queue_size(), - config.scheduler_params.on_demand_target_queue_utilization, - config.scheduler_params.on_demand_fee_variability, - ) { - Ok(new_traffic) => { - // Only update storage on change - if new_traffic != old_traffic { - SpotTraffic::::set(new_traffic); - Pallet::::deposit_event(Event::::SpotTrafficSet { - traffic: new_traffic, - }); - return T::DbWeight::get().reads_writes(2, 1) - } - }, - Err(SpotTrafficCalculationErr::QueueCapacityIsZero) => { - log::debug!( - target: LOG_TARGET, - "Error calculating spot traffic: The order queue capacity is at 0." - ); - }, - Err(SpotTrafficCalculationErr::QueueSizeLargerThanCapacity) => { - log::debug!( - target: LOG_TARGET, - "Error calculating spot traffic: The queue size is larger than the queue capacity." - ); - }, - Err(SpotTrafficCalculationErr::Division) => { - log::debug!( - target: LOG_TARGET, - "Error calculating spot traffic: Arithmetic error during division, either division by 0 or over/underflow." - ); - }, - }; - T::DbWeight::get().reads_writes(2, 0) + // We need to update the spot traffic on block initialize in order to account for idle + // blocks. + QueueStatus::::mutate(|queue_status| { + Self::update_spot_traffic(&config, queue_status); + }); + + // 2 reads in config and queuestatus, at maximum 1 write to queuestatus. + T::DbWeight::get().reads_writes(2, 1) } } @@ -258,7 +417,7 @@ pub mod pallet { /// Events: /// - `SpotOrderPlaced` #[pallet::call_index(0)] - #[pallet::weight(::WeightInfo::place_order_allow_death(OnDemandQueue::::get().len() as u32))] + #[pallet::weight(::WeightInfo::place_order_allow_death(QueueStatus::::get().size()))] pub fn place_order_allow_death( origin: OriginFor, max_amount: BalanceOf, @@ -285,7 +444,7 @@ pub mod pallet { /// Events: /// - `SpotOrderPlaced` #[pallet::call_index(1)] - #[pallet::weight(::WeightInfo::place_order_keep_alive(OnDemandQueue::::get().len() as u32))] + #[pallet::weight(::WeightInfo::place_order_keep_alive(QueueStatus::::get().size()))] pub fn place_order_keep_alive( origin: OriginFor, max_amount: BalanceOf, @@ -297,10 +456,78 @@ pub mod pallet { } } +// Internal functions and interface to scheduler/wrapping assignment provider. impl Pallet where BalanceOf: FixedPointOperand, { + /// Take the next queued entry that is available for a given core index. + /// + /// Parameters: + /// - `core_index`: The core index + pub fn pop_assignment_for_core(core_index: CoreIndex) -> Option { + let entry: Result = QueueStatus::::try_mutate(|queue_status| { + AffinityEntries::::try_mutate(core_index, |affinity_entries| { + let free_entry = FreeEntries::::try_mutate(|free_entries| { + let affinity_next = affinity_entries.peek(); + let free_next = free_entries.peek(); + let pick_free = match (affinity_next, free_next) { + (None, _) => true, + (Some(_), None) => false, + (Some(a), Some(f)) => f < a, + }; + if pick_free { + let entry = free_entries.pop().ok_or(())?; + let (mut affinities, free): (BinaryHeap<_>, BinaryHeap<_>) = + take(free_entries) + .into_iter() + .partition(|e| e.para_id == entry.para_id); + affinity_entries.append(&mut affinities); + *free_entries = free; + Ok(entry) + } else { + Err(()) + } + }); + let entry = free_entry.or_else(|()| affinity_entries.pop().ok_or(()))?; + queue_status.consume_index(entry.idx); + Ok(entry) + }) + }); + + let assignment = entry.map(|e| Assignment::Pool { para_id: e.para_id, core_index }).ok()?; + + Pallet::::increase_affinity(assignment.para_id(), core_index); + Some(assignment) + } + + /// Report that the `para_id` & `core_index` combination was processed. + /// + /// This should be called once it is clear that the assignment won't get pushed back anymore. + /// + /// In other words for each `pop_assignment_for_core` a call to this function or + /// `push_back_assignment` must follow, but only one. + pub fn report_processed(para_id: ParaId, core_index: CoreIndex) { + Pallet::::decrease_affinity_update_queue(para_id, core_index); + } + + /// Push an assignment back to the front of the queue. + /// + /// The assignment has not been processed yet. Typically used on session boundaries. + /// + /// NOTE: We are not checking queue size here. So due to push backs it is possible that we + /// exceed the maximum queue size slightly. + /// + /// Parameters: + /// - `para_id`: The para that did not make it. + /// - `core_index`: The core the para was scheduled on. + pub fn push_back_assignment(para_id: ParaId, core_index: CoreIndex) { + Pallet::::decrease_affinity_update_queue(para_id, core_index); + QueueStatus::::mutate(|queue_status| { + Pallet::::add_on_demand_order(queue_status, para_id, QueuePushDirection::Front); + }); + } + /// Helper function for `place_order_*` calls. Used to differentiate between placing orders /// with a keep alive check or to allow the account to be reaped. /// @@ -326,34 +553,62 @@ where ) -> DispatchResult { let config = >::config(); - // Traffic always falls back to 1.0 - let traffic = SpotTraffic::::get(); - - // Calculate spot price - let spot_price: BalanceOf = traffic.saturating_mul_int( - config.scheduler_params.on_demand_base_fee.saturated_into::>(), - ); - - // Is the current price higher than `max_amount` - ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); + QueueStatus::::mutate(|queue_status| { + Self::update_spot_traffic(&config, queue_status); + let traffic = queue_status.traffic; - // Charge the sending account the spot price - let _ = T::Currency::withdraw( - &sender, - spot_price, - WithdrawReasons::FEE, - existence_requirement, - )?; + // Calculate spot price + let spot_price: BalanceOf = traffic.saturating_mul_int( + config.scheduler_params.on_demand_base_fee.saturated_into::>(), + ); - let order = EnqueuedOrder::new(para_id); + // Is the current price higher than `max_amount` + ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); - let res = Pallet::::add_on_demand_order(order, QueuePushDirection::Back); + // Charge the sending account the spot price + let _ = T::Currency::withdraw( + &sender, + spot_price, + WithdrawReasons::FEE, + existence_requirement, + )?; - if res.is_ok() { - Pallet::::deposit_event(Event::::OnDemandOrderPlaced { para_id, spot_price }); - } + ensure!( + queue_status.size() < config.scheduler_params.on_demand_queue_max_size, + Error::::QueueFull + ); + Pallet::::add_on_demand_order(queue_status, para_id, QueuePushDirection::Back); + Ok(()) + }) + } - res + /// Calculate and update spot traffic. + fn update_spot_traffic( + config: &configuration::HostConfiguration>, + queue_status: &mut QueueStatusType, + ) { + let old_traffic = queue_status.traffic; + match Self::calculate_spot_traffic( + old_traffic, + config.scheduler_params.on_demand_queue_max_size, + queue_status.size(), + config.scheduler_params.on_demand_target_queue_utilization, + config.scheduler_params.on_demand_fee_variability, + ) { + Ok(new_traffic) => { + // Only update storage on change + if new_traffic != old_traffic { + queue_status.traffic = new_traffic; + Pallet::::deposit_event(Event::::SpotTrafficSet { traffic: new_traffic }); + } + }, + Err(err) => { + log::debug!( + target: LOG_TARGET, + "Error calculating spot traffic: {:?}", err + ); + }, + }; } /// The spot price multiplier. This is based on the transaction fee calculations defined in: @@ -378,7 +633,7 @@ where /// - `SpotTrafficCalculationErr::QueueCapacityIsZero` /// - `SpotTrafficCalculationErr::QueueSizeLargerThanCapacity` /// - `SpotTrafficCalculationErr::Division` - pub(crate) fn calculate_spot_traffic( + fn calculate_spot_traffic( traffic: FixedU128, queue_capacity: u32, queue_size: u32, @@ -430,175 +685,140 @@ where /// Adds an order to the on demand queue. /// /// Paramenters: - /// - `order`: The `EnqueuedOrder` to add to the queue. /// - `location`: Whether to push this entry to the back or the front of the queue. Pushing an /// entry to the front of the queue is only used when the scheduler wants to push back an /// entry it has already popped. - /// Returns: - /// - The unit type on success. - /// - /// Errors: - /// - `InvalidParaId` - /// - `QueueFull` fn add_on_demand_order( - order: EnqueuedOrder, + queue_status: &mut QueueStatusType, + para_id: ParaId, location: QueuePushDirection, - ) -> Result<(), DispatchError> { - // Only parathreads are valid paraids for on the go parachains. - ensure!(>::is_parathread(order.para_id), Error::::InvalidParaId); - - let config = >::config(); - - OnDemandQueue::::try_mutate(|queue| { - // Abort transaction if queue is too large - ensure!( - Self::queue_size() < config.scheduler_params.on_demand_queue_max_size, - Error::::QueueFull - ); - match location { - QueuePushDirection::Back => queue.push_back(order), - QueuePushDirection::Front => queue.push_front(order), - }; - Ok(()) - }) + ) { + let idx = match location { + QueuePushDirection::Back => queue_status.push_back(), + QueuePushDirection::Front => queue_status.push_front(), + }; + + let affinity = ParaIdAffinity::::get(para_id); + let order = EnqueuedOrder::new(idx, para_id); + #[cfg(test)] + log::debug!(target: LOG_TARGET, "add_on_demand_order, order: {:?}, affinity: {:?}, direction: {:?}", order, affinity, location); + + match affinity { + None => FreeEntries::::mutate(|entries| entries.push(order)), + Some(affinity) => + AffinityEntries::::mutate(affinity.core_index, |entries| entries.push(order)), + } } - /// Get the size of the on demand queue. + /// Decrease core affinity for para and update queue /// - /// Returns: - /// - The size of the on demand queue. - fn queue_size() -> u32 { - let config = >::config(); - match OnDemandQueue::::get().len().try_into() { - Ok(size) => return size, - Err(_) => { - log::debug!( - target: LOG_TARGET, - "Failed to fetch the on demand queue size, returning the max size." - ); - return config.scheduler_params.on_demand_queue_max_size - }, + /// if affinity dropped to 0, moving entries back to `FreeEntries`. + fn decrease_affinity_update_queue(para_id: ParaId, core_index: CoreIndex) { + let affinity = Pallet::::decrease_affinity(para_id, core_index); + #[cfg(not(test))] + debug_assert_ne!( + affinity, None, + "Decreased affinity for a para that has not been served on a core?" + ); + if affinity != Some(0) { + return } - } - - /// Getter for the order queue. - #[cfg(test)] - fn get_queue() -> VecDeque { - OnDemandQueue::::get() - } - - /// Getter for the affinity tracker. - pub fn get_affinity_map(para_id: ParaId) -> Option { - ParaIdAffinity::::get(para_id) + // No affinity more for entries on this core, free any entries: + // + // This is necessary to ensure them being served as the core might no longer exist at all. + AffinityEntries::::mutate(core_index, |affinity_entries| { + FreeEntries::::mutate(|free_entries| { + let (mut freed, affinities): (BinaryHeap<_>, BinaryHeap<_>) = + take(affinity_entries).into_iter().partition(|e| e.para_id == para_id); + free_entries.append(&mut freed); + *affinity_entries = affinities; + }) + }); } /// Decreases the affinity of a `ParaId` to a specified `CoreIndex`. - /// Subtracts from the count of the `CoreAffinityCount` if an entry is found and the core_idx + /// + /// Subtracts from the count of the `CoreAffinityCount` if an entry is found and the core_index /// matches. When the count reaches 0, the entry is removed. /// A non-existant entry is a no-op. - fn decrease_affinity(para_id: ParaId, core_idx: CoreIndex) { + /// + /// Returns: The new affinity of the para on that core. `None` if there is no affinity on this + /// core. + fn decrease_affinity(para_id: ParaId, core_index: CoreIndex) -> Option { ParaIdAffinity::::mutate(para_id, |maybe_affinity| { - if let Some(affinity) = maybe_affinity { - if affinity.core_idx == core_idx { - let new_count = affinity.count.saturating_sub(1); - if new_count > 0 { - *maybe_affinity = Some(CoreAffinityCount { core_idx, count: new_count }); - } else { - *maybe_affinity = None; - } + let affinity = maybe_affinity.as_mut()?; + if affinity.core_index == core_index { + let new_count = affinity.count.saturating_sub(1); + if new_count > 0 { + *maybe_affinity = Some(CoreAffinityCount { core_index, count: new_count }); + } else { + *maybe_affinity = None; } + return Some(new_count) + } else { + None } - }); + }) } /// Increases the affinity of a `ParaId` to a specified `CoreIndex`. - /// Adds to the count of the `CoreAffinityCount` if an entry is found and the core_idx matches. - /// A non-existant entry will be initialized with a count of 1 and uses the supplied + /// Adds to the count of the `CoreAffinityCount` if an entry is found and the core_index + /// matches. A non-existant entry will be initialized with a count of 1 and uses the supplied /// `CoreIndex`. - fn increase_affinity(para_id: ParaId, core_idx: CoreIndex) { + fn increase_affinity(para_id: ParaId, core_index: CoreIndex) { ParaIdAffinity::::mutate(para_id, |maybe_affinity| match maybe_affinity { Some(affinity) => - if affinity.core_idx == core_idx { + if affinity.core_index == core_index { *maybe_affinity = Some(CoreAffinityCount { - core_idx, + core_index, count: affinity.count.saturating_add(1), }); }, None => { - *maybe_affinity = Some(CoreAffinityCount { core_idx, count: 1 }); + *maybe_affinity = Some(CoreAffinityCount { core_index, count: 1 }); }, }) } -} -impl Pallet { - /// Take the next queued entry that is available for a given core index. - /// Invalidates and removes orders with a `para_id` that is not `ParaLifecycle::Parathread` - /// but only in [0..P] range slice of the order queue, where P is the element that is - /// removed from the order queue. - /// - /// Parameters: - /// - `core_idx`: The core index - pub fn pop_assignment_for_core(core_idx: CoreIndex) -> Option { - let mut queue: VecDeque = OnDemandQueue::::get(); - - let mut invalidated_para_id_indexes: Vec = vec![]; - - // Get the position of the next `ParaId`. Select either a valid `ParaId` that has an - // affinity to the same `CoreIndex` as the scheduler asks for or a valid `ParaId` with no - // affinity at all. - let pos = queue.iter().enumerate().position(|(index, assignment)| { - if >::is_parathread(assignment.para_id) { - match ParaIdAffinity::::get(&assignment.para_id) { - Some(affinity) => return affinity.core_idx == core_idx, - None => return true, - } - } - // Record no longer valid para_ids. - invalidated_para_id_indexes.push(index); - return false - }); + /// Getter for the affinity tracker. + #[cfg(test)] + fn get_affinity_map(para_id: ParaId) -> Option { + ParaIdAffinity::::get(para_id) + } - // Collect the popped value. - let popped = pos.and_then(|p: usize| { - if let Some(assignment) = queue.remove(p) { - Pallet::::increase_affinity(assignment.para_id, core_idx); - return Some(assignment) - }; - None - }); + /// Getter for the affinity entries. + #[cfg(test)] + fn get_affinity_entries(core_index: CoreIndex) -> BinaryHeap { + AffinityEntries::::get(core_index) + } - // Only remove the invalid indexes *after* using the index. - // Removed in reverse order so that the indexes don't shift. - invalidated_para_id_indexes.iter().rev().for_each(|idx| { - queue.remove(*idx); - }); + /// Getter for the free entries. + #[cfg(test)] + fn get_free_entries() -> BinaryHeap { + FreeEntries::::get() + } - // Write changes to storage. - OnDemandQueue::::set(queue); + #[cfg(feature = "runtime-benchmarks")] + pub fn populate_queue(para_id: ParaId, num: u32) { + QueueStatus::::mutate(|queue_status| { + for _ in 0..num { + Pallet::::add_on_demand_order(queue_status, para_id, QueuePushDirection::Back); + } + }); + } - popped.map(|p| Assignment::Pool { para_id: p.para_id, core_index: core_idx }) + #[cfg(test)] + fn set_queue_status(new_status: QueueStatusType) { + QueueStatus::::set(new_status); } - /// Report that the `para_id` & `core_index` combination was processed. - pub fn report_processed(para_id: ParaId, core_index: CoreIndex) { - Pallet::::decrease_affinity(para_id, core_index) + #[cfg(test)] + fn get_queue_status() -> QueueStatusType { + QueueStatus::::get() } - /// Push an assignment back to the front of the queue. - /// - /// The assignment has not been processed yet. Typically used on session boundaries. - /// Parameters: - /// - `assignment`: The on demand assignment. - pub fn push_back_assignment(para_id: ParaId, core_index: CoreIndex) { - Pallet::::decrease_affinity(para_id, core_index); - // Skip the queue on push backs from scheduler - match Pallet::::add_on_demand_order( - EnqueuedOrder::new(para_id), - QueuePushDirection::Front, - ) { - Ok(_) => {}, - Err(_) => {}, - } + #[cfg(test)] + fn get_traffic_default_value() -> FixedU128 { + ::TrafficDefaultValue::get() } } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index 8404700780c..982efe77b93 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -73,11 +73,24 @@ fn run_to_block( Paras::initializer_initialize(b + 1); Scheduler::initializer_initialize(b + 1); + // We need to update the spot traffic on every block. + OnDemandAssigner::on_initialize(b + 1); + // In the real runtime this is expected to be called by the `InclusionInherent` pallet. Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), b + 1); } } +fn place_order(para_id: ParaId) { + let alice = 100u64; + let amt = 10_000_000u128; + + Balances::make_free_balance_be(&alice, amt); + + run_to_block(101, |n| if n == 101 { Some(Default::default()) } else { None }); + OnDemandAssigner::place_order_allow_death(RuntimeOrigin::signed(alice), amt, para_id).unwrap() +} + #[test] fn spot_traffic_capacity_zero_returns_none() { match OnDemandAssigner::calculate_spot_traffic( @@ -201,6 +214,42 @@ fn spot_traffic_decreases_over_time() { assert_eq!(traffic, FixedU128::from_inner(3_125_000_000_000_000_000u128)) } +#[test] +fn spot_traffic_decreases_between_idle_blocks() { + // Testing spot traffic assumptions, but using the mock runtime and default on demand + // configuration values. Ensuring that blocks with no on demand activity (idle) + // decrease traffic. + + let para_id = ParaId::from(111); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // Initialize the parathread and wait for it to be ready. + schedule_blank_para(para_id, ParaKind::Parathread); + assert!(!Paras::is_parathread(para_id)); + run_to_block(100, |n| if n == 100 { Some(Default::default()) } else { None }); + assert!(Paras::is_parathread(para_id)); + + // Set the spot traffic to a large number + OnDemandAssigner::set_queue_status(QueueStatusType { + traffic: FixedU128::from_u32(10), + ..Default::default() + }); + + assert_eq!(OnDemandAssigner::get_queue_status().traffic, FixedU128::from_u32(10)); + + // Run to block 101 and ensure that the traffic decreases. + run_to_block(101, |n| if n == 100 { Some(Default::default()) } else { None }); + assert!(OnDemandAssigner::get_queue_status().traffic < FixedU128::from_u32(10)); + + // Run to block 102 and observe that we've hit the default traffic value. + run_to_block(102, |n| if n == 100 { Some(Default::default()) } else { None }); + assert_eq!( + OnDemandAssigner::get_queue_status().traffic, + OnDemandAssigner::get_traffic_default_value() + ); + }) +} + #[test] fn place_order_works() { let alice = 1u64; @@ -278,74 +327,6 @@ fn place_order_keep_alive_keeps_alive() { }); } -#[test] -fn add_on_demand_order_works() { - let para_a = ParaId::from(111); - let order = EnqueuedOrder::new(para_a); - - let mut genesis = GenesisConfigBuilder::default(); - genesis.on_demand_max_queue_size = 1; - new_test_ext(genesis.build()).execute_with(|| { - // Initialize the parathread and wait for it to be ready. - schedule_blank_para(para_a, ParaKind::Parathread); - - // `para_a` is not onboarded as a parathread yet. - assert_noop!( - OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back), - Error::::InvalidParaId - ); - - assert!(!Paras::is_parathread(para_a)); - run_to_block(100, |n| if n == 100 { Some(Default::default()) } else { None }); - assert!(Paras::is_parathread(para_a)); - - // `para_a` is now onboarded as a valid parathread. - assert_ok!(OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back)); - - // Max queue size is 1, queue should be full. - assert_noop!( - OnDemandAssigner::add_on_demand_order(order, QueuePushDirection::Back), - Error::::QueueFull - ); - }); -} - -#[test] -fn spotqueue_push_directions() { - new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - let para_a = ParaId::from(111); - let para_b = ParaId::from(222); - let para_c = ParaId::from(333); - - schedule_blank_para(para_a, ParaKind::Parathread); - schedule_blank_para(para_b, ParaKind::Parathread); - schedule_blank_para(para_c, ParaKind::Parathread); - - run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - - let order_a = EnqueuedOrder::new(para_a); - let order_b = EnqueuedOrder::new(para_b); - let order_c = EnqueuedOrder::new(para_c); - - assert_ok!(OnDemandAssigner::add_on_demand_order( - order_a.clone(), - QueuePushDirection::Front - )); - assert_ok!(OnDemandAssigner::add_on_demand_order( - order_b.clone(), - QueuePushDirection::Front - )); - - assert_ok!(OnDemandAssigner::add_on_demand_order( - order_c.clone(), - QueuePushDirection::Back - )); - - assert_eq!(OnDemandAssigner::queue_size(), 3); - assert_eq!(OnDemandAssigner::get_queue(), VecDeque::from(vec![order_b, order_a, order_c])) - }); -} - #[test] fn pop_assignment_for_core_works() { new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { @@ -356,51 +337,32 @@ fn pop_assignment_for_core_works() { run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - let order_a = EnqueuedOrder::new(para_a); - let order_b = EnqueuedOrder::new(para_b); - let assignment_a = Assignment::Pool { para_id: para_a, core_index: CoreIndex(0) }; - let assignment_b = Assignment::Pool { para_id: para_b, core_index: CoreIndex(1) }; - // Pop should return none with empty queue assert_eq!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)), None); // Add enough assignments to the order queue. for _ in 0..2 { - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - } - - // Queue should contain orders a, b, a, b - { - let queue: Vec = OnDemandQueue::::get().into_iter().collect(); - assert_eq!( - queue, - vec![order_a.clone(), order_b.clone(), order_a.clone(), order_b.clone()] - ); + place_order(para_a); + place_order(para_b); } // Popped assignments should be for the correct paras and cores assert_eq!( - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)), - Some(assignment_a.clone()) + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).map(|a| a.para_id()), + Some(para_a) ); assert_eq!( - OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)), - Some(assignment_b.clone()) + OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)).map(|a| a.para_id()), + Some(para_b) ); assert_eq!( - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)), - Some(assignment_a.clone()) + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).map(|a| a.para_id()), + Some(para_a) + ); + assert_eq!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)).map(|a| a.para_id()), + Some(para_b) ); - - // Queue should contain one left over order - { - let queue: Vec = OnDemandQueue::::get().into_iter().collect(); - assert_eq!(queue, vec![order_b.clone(),]); - } }); } @@ -414,28 +376,19 @@ fn push_back_assignment_works() { run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - let order_a = EnqueuedOrder::new(para_a); - let order_b = EnqueuedOrder::new(para_b); - // Add enough assignments to the order queue. - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); + place_order(para_a); + place_order(para_b); // Pop order a - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)); + assert_eq!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).unwrap().para_id(), + para_a + ); // Para a should have affinity for core 0 assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); - assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, CoreIndex(0)); - - // Queue should still contain order b - { - let queue: Vec = OnDemandQueue::::get().into_iter().collect(); - assert_eq!(queue, vec![order_b.clone()]); - } + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_index, CoreIndex(0)); // Push back order a OnDemandAssigner::push_back_assignment(para_a, CoreIndex(0)); @@ -444,10 +397,82 @@ fn push_back_assignment_works() { assert_eq!(OnDemandAssigner::get_affinity_map(para_a).is_none(), true); // Queue should contain orders a, b. A in front of b. - { - let queue: Vec = OnDemandQueue::::get().into_iter().collect(); - assert_eq!(queue, vec![order_a.clone(), order_b.clone()]); + assert_eq!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).unwrap().para_id(), + para_a + ); + assert_eq!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).unwrap().para_id(), + para_b + ); + }); +} + +#[test] +fn affinity_prohibits_parallel_scheduling() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let para_a = ParaId::from(111); + let para_b = ParaId::from(222); + + schedule_blank_para(para_a, ParaKind::Parathread); + schedule_blank_para(para_b, ParaKind::Parathread); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + // There should be no affinity before starting. + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); + + // Add 2 assignments for para_a for every para_b. + place_order(para_a); + place_order(para_a); + place_order(para_b); + + // Approximate having 1 core. + for _ in 0..3 { + assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).is_some()); } + assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).is_none()); + + // Affinity on one core is meaningless. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 2); + assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); + assert_eq!( + OnDemandAssigner::get_affinity_map(para_a).unwrap().core_index, + OnDemandAssigner::get_affinity_map(para_b).unwrap().core_index, + ); + + // Clear affinity + OnDemandAssigner::report_processed(para_a, 0.into()); + OnDemandAssigner::report_processed(para_a, 0.into()); + OnDemandAssigner::report_processed(para_b, 0.into()); + + // Add 2 assignments for para_a for every para_b. + place_order(para_a); + place_order(para_a); + place_order(para_b); + + // Approximate having 3 cores. CoreIndex 2 should be unable to obtain an assignment + for _ in 0..3 { + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)); + OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)); + assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(2)).is_none()); + } + + // Affinity should be the same as before, but on different cores. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 2); + assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_index, CoreIndex(0)); + assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().core_index, CoreIndex(1)); + + // Clear affinity + OnDemandAssigner::report_processed(para_a, CoreIndex(0)); + OnDemandAssigner::report_processed(para_a, CoreIndex(0)); + OnDemandAssigner::report_processed(para_b, CoreIndex(1)); + + // There should be no affinity after clearing. + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); }); } @@ -458,7 +483,6 @@ fn affinity_changes_work() { let core_index = CoreIndex(0); schedule_blank_para(para_a, ParaKind::Parathread); - let order_a = EnqueuedOrder::new(para_a); run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); // There should be no affinity before starting. @@ -466,8 +490,7 @@ fn affinity_changes_work() { // Add enough assignments to the order queue. for _ in 0..10 { - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Front) - .expect("Invalid paraid or queue full"); + place_order(para_a); } // There should be no affinity before the scheduler pops. @@ -483,7 +506,6 @@ fn affinity_changes_work() { // Affinity count is 1 after popping with a previous para. assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); - assert_eq!(OnDemandAssigner::queue_size(), 8); for _ in 0..3 { OnDemandAssigner::pop_assignment_for_core(core_index); @@ -491,147 +513,197 @@ fn affinity_changes_work() { // Affinity count is 4 after popping 3 times without a previous para. assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 4); - assert_eq!(OnDemandAssigner::queue_size(), 5); for _ in 0..5 { OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::pop_assignment_for_core(core_index); + assert!(OnDemandAssigner::pop_assignment_for_core(core_index).is_some()); } // Affinity count should still be 4 but queue should be empty. + assert!(OnDemandAssigner::pop_assignment_for_core(core_index).is_none()); assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 4); - assert_eq!(OnDemandAssigner::queue_size(), 0); // Pop 4 times and get to exactly 0 (None) affinity. for _ in 0..4 { OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::pop_assignment_for_core(core_index); + assert!(OnDemandAssigner::pop_assignment_for_core(core_index).is_none()); } assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); // Decreasing affinity beyond 0 should still be None. OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::pop_assignment_for_core(core_index); + assert!(OnDemandAssigner::pop_assignment_for_core(core_index).is_none()); assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); }); } #[test] -fn affinity_prohibits_parallel_scheduling() { - new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - let para_a = ParaId::from(111); - let para_b = ParaId::from(222); +fn new_affinity_for_a_core_must_come_from_free_entries() { + // If affinity count for a core was zero before, and is 1 now, then the entry + // must have come from free_entries. + let parachains = + vec![ParaId::from(111), ParaId::from(222), ParaId::from(333), ParaId::from(444)]; + let core_indices = vec![CoreIndex(0), CoreIndex(1), CoreIndex(2), CoreIndex(3)]; - schedule_blank_para(para_a, ParaKind::Parathread); - schedule_blank_para(para_b, ParaKind::Parathread); + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + parachains.iter().for_each(|chain| { + schedule_blank_para(*chain, ParaKind::Parathread); + }); run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - let order_a = EnqueuedOrder::new(para_a); - let order_b = EnqueuedOrder::new(para_b); - - // There should be no affinity before starting. - assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); - assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); - - // Add 2 assignments for para_a for every para_b. - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - assert_eq!(OnDemandAssigner::queue_size(), 3); + // Place orders for all chains. + parachains.iter().for_each(|chain| { + place_order(*chain); + }); + + // There are 4 entries in free_entries. + let start_free_entries = OnDemandAssigner::get_free_entries().len(); + assert_eq!(start_free_entries, 4); + + // Pop assignments on all cores. + core_indices.iter().enumerate().for_each(|(n, core_index)| { + // There is no affinity on the core prior to popping. + assert!(OnDemandAssigner::get_affinity_entries(*core_index).is_empty()); + + // There's always an order to be popped for each core. + let free_entries = OnDemandAssigner::get_free_entries(); + let next_order = free_entries.peek(); + + // There is no affinity on the paraid prior to popping. + assert!(OnDemandAssigner::get_affinity_map(next_order.unwrap().para_id).is_none()); + + match OnDemandAssigner::pop_assignment_for_core(*core_index) { + Some(assignment) => { + // The popped assignment came from free entries. + assert_eq!( + start_free_entries - 1 - n, + OnDemandAssigner::get_free_entries().len() + ); + // The popped assignment has the same para id as the next order. + assert_eq!(assignment.para_id(), next_order.unwrap().para_id); + }, + None => panic!("Should not happen"), + } + }); - // Approximate having 1 core. - for _ in 0..3 { - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)); - } + // All entries have been removed from free_entries. + assert!(OnDemandAssigner::get_free_entries().is_empty()); - // Affinity on one core is meaningless. - assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 2); - assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); - assert_eq!( - OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, - OnDemandAssigner::get_affinity_map(para_b).unwrap().core_idx - ); - - // Clear affinity - OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::report_processed(para_b, 0.into()); - - // Add 2 assignments for para_a for every para_b. - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); + // All chains have an affinity count of 1. + parachains.iter().for_each(|chain| { + assert_eq!(OnDemandAssigner::get_affinity_map(*chain).unwrap().count, 1); + }); + }); +} - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); +#[test] +#[should_panic] +fn queue_index_ordering_is_unsound_over_max_size() { + // NOTE: Unsoundness proof. If the number goes sufficiently over the max_queue_max_size + // the overflow will cause an opposite comparison to what would be expected. + let max_num = u32::MAX - ON_DEMAND_MAX_QUEUE_MAX_SIZE; + // 0 < some large number. + assert_eq!(QueueIndex(0).cmp(&QueueIndex(max_num + 1)), Ordering::Less); +} - OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); +#[test] +fn queue_index_ordering_works() { + // The largest accepted queue size. + let max_num = ON_DEMAND_MAX_QUEUE_MAX_SIZE; + + // 0 == 0 + assert_eq!(QueueIndex(0).cmp(&QueueIndex(0)), Ordering::Equal); + // 0 < 1 + assert_eq!(QueueIndex(0).cmp(&QueueIndex(1)), Ordering::Less); + // 1 > 0 + assert_eq!(QueueIndex(1).cmp(&QueueIndex(0)), Ordering::Greater); + // 0 < max_num + assert_eq!(QueueIndex(0).cmp(&QueueIndex(max_num)), Ordering::Less); + // 0 > max_num + 1 + assert_eq!(QueueIndex(0).cmp(&QueueIndex(max_num + 1)), Ordering::Less); + + // Ordering within the bounds of ON_DEMAND_MAX_QUEUE_MAX_SIZE works. + let mut v = vec![3, 6, 2, 1, 5, 4]; + v.sort_by_key(|&num| QueueIndex(num)); + assert_eq!(v, vec![1, 2, 3, 4, 5, 6]); + + v = vec![max_num, 4, 5, 1, 6]; + v.sort_by_key(|&num| QueueIndex(num)); + assert_eq!(v, vec![1, 4, 5, 6, max_num]); + + // Ordering with an element outside of the bounds of the max size also works. + v = vec![max_num + 2, 0, 6, 2, 1, 5, 4]; + v.sort_by_key(|&num| QueueIndex(num)); + assert_eq!(v, vec![0, 1, 2, 4, 5, 6, max_num + 2]); + + // Numbers way above the max size will overflow + v = vec![u32::MAX - 1, u32::MAX, 6, 2, 1, 5, 4]; + v.sort_by_key(|&num| QueueIndex(num)); + assert_eq!(v, vec![u32::MAX - 1, u32::MAX, 1, 2, 4, 5, 6]); +} - // Approximate having 3 cores. CoreIndex 2 should be unable to obtain an assignment - for _ in 0..3 { - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)); - OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)); - assert_eq!(None, OnDemandAssigner::pop_assignment_for_core(CoreIndex(2))); - } +#[test] +fn reverse_queue_index_does_reverse() { + let mut v = vec![1, 2, 3, 4, 5, 6]; - // Affinity should be the same as before, but on different cores. - assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 2); - assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); - assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, CoreIndex(0)); - assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().core_idx, CoreIndex(1)); + // Basic reversal of a vector. + v.sort_by_key(|&num| ReverseQueueIndex(num)); + assert_eq!(v, vec![6, 5, 4, 3, 2, 1]); - // Clear affinity - OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::report_processed(para_b, 1.into()); + // Example from rust docs on `Reverse`. Should work identically. + v.sort_by_key(|&num| (num > 3, ReverseQueueIndex(num))); + assert_eq!(v, vec![3, 2, 1, 6, 5, 4]); - // There should be no affinity after clearing. - assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); - assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); - }); + let mut v2 = vec![1, 2, u32::MAX]; + v2.sort_by_key(|&num| ReverseQueueIndex(num)); + assert_eq!(v2, vec![2, 1, u32::MAX]); } #[test] -fn on_demand_orders_cannot_be_popped_if_lifecycle_changes() { - let para_id = ParaId::from(10); - let core_index = CoreIndex(0); - let order = EnqueuedOrder::new(para_id); +fn queue_status_size_fn_works() { + // Add orders to the on demand queue, and make sure that they are properly represented + // by the QueueStatusType::size fn. + let parachains = vec![ParaId::from(111), ParaId::from(222), ParaId::from(333)]; + let core_indices = vec![CoreIndex(0), CoreIndex(1)]; new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - // Register the para_id as a parathread - schedule_blank_para(para_id, ParaKind::Parathread); - - assert!(!Paras::is_parathread(para_id)); - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); - assert!(Paras::is_parathread(para_id)); + parachains.iter().for_each(|chain| { + schedule_blank_para(*chain, ParaKind::Parathread); + }); - // Add two assignments for a para_id with a valid lifecycle. - assert_ok!(OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back)); - assert_ok!(OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back)); + assert_eq!(OnDemandAssigner::get_queue_status().size(), 0); - // First pop is fine - assert!( - OnDemandAssigner::pop_assignment_for_core(core_index) == - Some(Assignment::Pool { para_id, core_index }) - ); + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - // Deregister para - assert_ok!(Paras::schedule_para_cleanup(para_id)); + // Place orders for all chains. + parachains.iter().for_each(|chain| { + // 2 per chain for a total of 6 + place_order(*chain); + place_order(*chain); + }); - // Run to new session and verify that para_id is no longer a valid parathread. - assert!(Paras::is_parathread(para_id)); - run_to_block(20, |n| if n == 20 { Some(Default::default()) } else { None }); - assert!(!Paras::is_parathread(para_id)); + // 6 orders in free entries + assert_eq!(OnDemandAssigner::get_free_entries().len(), 6); + // 6 orders via queue status size + assert_eq!( + OnDemandAssigner::get_free_entries().len(), + OnDemandAssigner::get_queue_status().size() as usize + ); - // Second pop should be None. - OnDemandAssigner::report_processed(para_id, core_index); - assert_eq!(OnDemandAssigner::pop_assignment_for_core(core_index), None); + core_indices.iter().for_each(|core_index| { + OnDemandAssigner::pop_assignment_for_core(*core_index); + }); + + // There should be 2 orders in the scheduler's claimqueue, + // 2 in assorted AffinityMaps and 2 in free. + // ParaId 111 + assert_eq!(OnDemandAssigner::get_affinity_entries(core_indices[0]).len(), 1); + // ParaId 222 + assert_eq!(OnDemandAssigner::get_affinity_entries(core_indices[1]).len(), 1); + // Free entries are from ParaId 333 + assert_eq!(OnDemandAssigner::get_free_entries().len(), 2); + // For a total size of 4. + assert_eq!(OnDemandAssigner::get_queue_status().size(), 4) }); } diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 364a15215d3..b7635dcd7b2 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -29,6 +29,7 @@ use primitives::{ vstaging::{ApprovalVotingParams, NodeFeatures}, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, + ON_DEMAND_MAX_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; use sp_std::prelude::*; @@ -312,6 +313,8 @@ pub enum InconsistentError { InconsistentExecutorParams { inner: ExecutorParamError }, /// TTL should be bigger than lookahead LookaheadExceedsTTL, + /// Passed in queue size for on-demand was too large. + OnDemandQueueSizeTooLarge, } impl HostConfiguration @@ -405,6 +408,10 @@ where return Err(LookaheadExceedsTTL) } + if self.scheduler_params.on_demand_queue_max_size > ON_DEMAND_MAX_QUEUE_MAX_SIZE { + return Err(OnDemandQueueSizeTooLarge) + } + Ok(()) } @@ -630,7 +637,7 @@ pub mod pallet { /// Set the number of coretime execution cores. /// - /// Note that this configuration is managed by the coretime chain. Only manually change + /// NOTE: that this configuration is managed by the coretime chain. Only manually change /// this, if you really know what you are doing! #[pallet::call_index(6)] #[pallet::weight(( @@ -1133,6 +1140,7 @@ pub mod pallet { config.scheduler_params.on_demand_queue_max_size = new; }) } + /// Set the on demand (parathreads) fee variability. #[pallet::call_index(50)] #[pallet::weight(( diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index f68870c98ea..a773eeb5cbd 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1659,6 +1659,7 @@ pub mod migrations { // This needs to come after the `parachains_configuration` above as we are reading the configuration. coretime::migration::MigrateToCoretime, parachains_configuration::migration::v12::MigrateToV12, + parachains_assigner_on_demand::migration::MigrateV0ToV1, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs index ac0f05301b4..dba9e7904c7 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `runtime_parachains::assigner_on_demand` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -31,11 +31,11 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::assigner_on_demand // --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,44 +48,44 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::assigner_on_demand`. pub struct WeightInfo(PhantomData); impl runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { - /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) - /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Paras::ParaLifecycles` (r:1 w:0) - /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) - /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 9999]`. fn place_order_keep_alive(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_522_000 picoseconds. - Weight::from_parts(35_436_835, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 129 - .saturating_add(Weight::from_parts(14_041, 0).saturating_mul(s.into())) + // Measured: `218 + s * (8 ±0)` + // Estimated: `3681 + s * (8 ±0)` + // Minimum execution time: 21_053_000 picoseconds. + Weight::from_parts(17_291_897, 0) + .saturating_add(Weight::from_parts(0, 3681)) + // Standard Error: 104 + .saturating_add(Weight::from_parts(18_779, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } - /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) - /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Paras::ParaLifecycles` (r:1 w:0) - /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) - /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 9999]`. fn place_order_allow_death(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_488_000 picoseconds. - Weight::from_parts(34_848_934, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 143 - .saturating_add(Weight::from_parts(14_215, 0).saturating_mul(s.into())) + // Measured: `218 + s * (8 ±0)` + // Estimated: `3681 + s * (8 ±0)` + // Minimum execution time: 20_843_000 picoseconds. + Weight::from_parts(16_881_986, 0) + .saturating_add(Weight::from_parts(0, 3681)) + // Standard Error: 104 + .saturating_add(Weight::from_parts(18_788, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } } diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs index ac0f05301b4..acd1834f79e 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::assigner_on_demand` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot @@ -31,11 +31,11 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::assigner_on_demand -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,44 +48,44 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::assigner_on_demand`. pub struct WeightInfo(PhantomData); impl runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { - /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) - /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Paras::ParaLifecycles` (r:1 w:0) - /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) - /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 9999]`. fn place_order_keep_alive(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_522_000 picoseconds. - Weight::from_parts(35_436_835, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 129 - .saturating_add(Weight::from_parts(14_041, 0).saturating_mul(s.into())) + // Measured: `218 + s * (8 ±0)` + // Estimated: `3681 + s * (8 ±0)` + // Minimum execution time: 21_396_000 picoseconds. + Weight::from_parts(20_585_695, 0) + .saturating_add(Weight::from_parts(0, 3681)) + // Standard Error: 127 + .saturating_add(Weight::from_parts(20_951, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } - /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) - /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Paras::ParaLifecycles` (r:1 w:0) - /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) - /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 9999]`. fn place_order_allow_death(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_488_000 picoseconds. - Weight::from_parts(34_848_934, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 143 - .saturating_add(Weight::from_parts(14_215, 0).saturating_mul(s.into())) + // Measured: `218 + s * (8 ±0)` + // Estimated: `3681 + s * (8 ±0)` + // Minimum execution time: 21_412_000 picoseconds. + Weight::from_parts(19_731_554, 0) + .saturating_add(Weight::from_parts(0, 3681)) + // Standard Error: 128 + .saturating_add(Weight::from_parts(21_055, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } } diff --git a/prdoc/pr_3190.prdoc b/prdoc/pr_3190.prdoc new file mode 100644 index 00000000000..2f7a89a0b1a --- /dev/null +++ b/prdoc/pr_3190.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix algorithmic complexity of the on-demand scheduler. + +doc: + - audience: Runtime Dev + description: | + Improves on demand performance by a significant factor. Previously, having many on-demand cores + would cause really poor blocktimes due to the fact that for each core the full order queue was + processed. This allows for increasing the max size of the on-demand queue if needed. + + At the same time, the spot price for on-demand is now checked prior to every order, ensuring + that economic backpressure will be applied. + +crates: + - name: polkadot-runtime-parachains -- GitLab From 1da8a6b88f530b467394a9ead8f3042ebdc50a36 Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Wed, 20 Mar 2024 20:41:43 +0100 Subject: [PATCH 006/128] Enable PoV reclaim on `rococo-parachain` (#3765) This PR proposes enabling PoV reclaim on the `rococo-parachain` testchain to streamline testing and development of high-TPS stuff. --- Cargo.lock | 1 + cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml | 2 ++ cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs | 1 + 3 files changed, 4 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 5401dc5ecfb..bdbf6ddac26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14973,6 +14973,7 @@ dependencies = [ "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 577ed749167..790f38d94f5 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -56,6 +56,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-ping = { path = "../../../pallets/ping", default-features = false } cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } parachains-common = { path = "../../../common", default-features = false } testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } @@ -75,6 +76,7 @@ std = [ "cumulus-ping/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 1b7efa6f400..b3bea4d4e65 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -653,6 +653,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = -- GitLab From 93b1abb280cf9d6c64f00b66c3593c2b04d50a9e Mon Sep 17 00:00:00 2001 From: gupnik Date: Thu, 21 Mar 2024 08:41:51 +0530 Subject: [PATCH 007/128] Migrates Westend to Runtime V2 (#3754) Step in https://github.com/paritytech/polkadot-sdk/issues/3688 --- polkadot/runtime/westend/Cargo.toml | 2 +- polkadot/runtime/westend/src/lib.rs | 321 +++++++++++++++++----------- prdoc/pr_3754.prdoc | 13 ++ 3 files changed, 211 insertions(+), 125 deletions(-) create mode 100644 prdoc/pr_3754.prdoc diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 4180828bcfb..fcead1dd0b5 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -45,7 +45,7 @@ sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", def frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["tuples-96"] } +frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["experimental", "tuples-96"] } frame-system = { path = "../../../substrate/frame/system", default-features = false } frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } westend-runtime-constants = { package = "westend-runtime-constants", path = "constants", default-features = false } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index f1d8841989c..62821cae7e4 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -17,7 +17,7 @@ //! The Westend runtime. This can be compiled with `#[no_std]`, ready for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit. +// `#[frame_support::runtime]!` does a lot of recursion and requires us to increase the limit. #![recursion_limit = "512"] use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; @@ -27,7 +27,7 @@ use beefy_primitives::{ }; use frame_election_provider_support::{bounds::ElectionBoundsBuilder, onchain, SequentialPhragmen}; use frame_support::{ - construct_runtime, derive_impl, + derive_impl, genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ @@ -1414,128 +1414,201 @@ impl pallet_asset_rate::Config for Runtime { type BenchmarkHelper = runtime_common::impls::benchmarks::AssetRateArguments; } -construct_runtime! { - pub enum Runtime - { - // Basic stuff; balances is uncallable initially. - System: frame_system = 0, - - // Babe must be before session. - Babe: pallet_babe = 1, - - Timestamp: pallet_timestamp = 2, - Indices: pallet_indices = 3, - Balances: pallet_balances = 4, - TransactionPayment: pallet_transaction_payment = 26, - - // Consensus support. - // Authorship must be before session in order to note author in the correct session and era. - Authorship: pallet_authorship = 5, - Staking: pallet_staking = 6, - Offences: pallet_offences = 7, - Historical: session_historical = 27, - - Session: pallet_session = 8, - Grandpa: pallet_grandpa = 10, - AuthorityDiscovery: pallet_authority_discovery = 12, - - // Utility module. - Utility: pallet_utility = 16, - - // Less simple identity module. - Identity: pallet_identity = 17, - - // Social recovery module. - Recovery: pallet_recovery = 18, - - // Vesting. Usable initially, but removed once all vesting is finished. - Vesting: pallet_vesting = 19, - - // System scheduler. - Scheduler: pallet_scheduler = 20, - - // Preimage registrar. - Preimage: pallet_preimage = 28, - - // Sudo. - Sudo: pallet_sudo = 21, - - // Proxy module. Late addition. - Proxy: pallet_proxy = 22, - - // Multisig module. Late addition. - Multisig: pallet_multisig = 23, - - // Election pallet. Only works with staking, but placed here to maintain indices. - ElectionProviderMultiPhase: pallet_election_provider_multi_phase = 24, - - // Provides a semi-sorted list of nominators for staking. - VoterList: pallet_bags_list:: = 25, - - // Nomination pools for staking. - NominationPools: pallet_nomination_pools = 29, - - // Fast unstake pallet: extension to staking. - FastUnstake: pallet_fast_unstake = 30, - - // OpenGov - ConvictionVoting: pallet_conviction_voting = 31, - Referenda: pallet_referenda = 32, - Origins: pallet_custom_origins = 35, - Whitelist: pallet_whitelist = 36, - - // Treasury - Treasury: pallet_treasury = 37, - - // Parachains pallets. Start indices at 40 to leave room. - ParachainsOrigin: parachains_origin = 41, - Configuration: parachains_configuration = 42, - ParasShared: parachains_shared = 43, - ParaInclusion: parachains_inclusion = 44, - ParaInherent: parachains_paras_inherent = 45, - ParaScheduler: parachains_scheduler = 46, - Paras: parachains_paras = 47, - Initializer: parachains_initializer = 48, - Dmp: parachains_dmp = 49, - // RIP Ump 50 - Hrmp: parachains_hrmp = 51, - ParaSessionInfo: parachains_session_info = 52, - ParasDisputes: parachains_disputes = 53, - ParasSlashing: parachains_slashing = 54, - OnDemandAssignmentProvider: parachains_assigner_on_demand = 56, - CoretimeAssignmentProvider: parachains_assigner_coretime = 57, - - // Parachain Onboarding Pallets. Start indices at 60 to leave room. - Registrar: paras_registrar = 60, - Slots: slots = 61, - ParasSudoWrapper: paras_sudo_wrapper = 62, - Auctions: auctions = 63, - Crowdloan: crowdloan = 64, - AssignedSlots: assigned_slots = 65, - Coretime: coretime = 66, - - // Pallet for sending XCM. - XcmPallet: pallet_xcm = 99, - - // Generalized message queue - MessageQueue: pallet_message_queue = 100, - - // Asset rate. - AssetRate: pallet_asset_rate = 101, - - // Root testing pallet. - RootTesting: pallet_root_testing = 102, - - // BEEFY Bridges support. - Beefy: pallet_beefy = 200, - // MMR leaf construction must be after session in order to have a leaf's next_auth_set - // refer to block. See issue polkadot-fellows/runtimes#160 for details. - Mmr: pallet_mmr = 201, - BeefyMmrLeaf: pallet_beefy_mmr = 202, - - // Pallet for migrating Identity to a parachain. To be removed post-migration. - IdentityMigrator: identity_migrator = 248, - } +#[frame_support::runtime(legacy_ordering)] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + // Basic stuff; balances is uncallable initially. + #[runtime::pallet_index(0)] + pub type System = frame_system; + + // Babe must be before session. + #[runtime::pallet_index(1)] + pub type Babe = pallet_babe; + + #[runtime::pallet_index(2)] + pub type Timestamp = pallet_timestamp; + #[runtime::pallet_index(3)] + pub type Indices = pallet_indices; + #[runtime::pallet_index(4)] + pub type Balances = pallet_balances; + #[runtime::pallet_index(26)] + pub type TransactionPayment = pallet_transaction_payment; + + // Consensus support. + // Authorship must be before session in order to note author in the correct session and era. + #[runtime::pallet_index(5)] + pub type Authorship = pallet_authorship; + #[runtime::pallet_index(6)] + pub type Staking = pallet_staking; + #[runtime::pallet_index(7)] + pub type Offences = pallet_offences; + #[runtime::pallet_index(27)] + pub type Historical = session_historical; + + #[runtime::pallet_index(8)] + pub type Session = pallet_session; + #[runtime::pallet_index(10)] + pub type Grandpa = pallet_grandpa; + #[runtime::pallet_index(12)] + pub type AuthorityDiscovery = pallet_authority_discovery; + + // Utility module. + #[runtime::pallet_index(16)] + pub type Utility = pallet_utility; + + // Less simple identity module. + #[runtime::pallet_index(17)] + pub type Identity = pallet_identity; + + // Social recovery module. + #[runtime::pallet_index(18)] + pub type Recovery = pallet_recovery; + + // Vesting. Usable initially, but removed once all vesting is finished. + #[runtime::pallet_index(19)] + pub type Vesting = pallet_vesting; + + // System scheduler. + #[runtime::pallet_index(20)] + pub type Scheduler = pallet_scheduler; + + // Preimage registrar. + #[runtime::pallet_index(28)] + pub type Preimage = pallet_preimage; + + // Sudo. + #[runtime::pallet_index(21)] + pub type Sudo = pallet_sudo; + + // Proxy module. Late addition. + #[runtime::pallet_index(22)] + pub type Proxy = pallet_proxy; + + // Multisig module. Late addition. + #[runtime::pallet_index(23)] + pub type Multisig = pallet_multisig; + + // Election pallet. Only works with staking, but placed here to maintain indices. + #[runtime::pallet_index(24)] + pub type ElectionProviderMultiPhase = pallet_election_provider_multi_phase; + + // Provides a semi-sorted list of nominators for staking. + #[runtime::pallet_index(25)] + pub type VoterList = pallet_bags_list; + + // Nomination pools for staking. + #[runtime::pallet_index(29)] + pub type NominationPools = pallet_nomination_pools; + + // Fast unstake pallet = extension to staking. + #[runtime::pallet_index(30)] + pub type FastUnstake = pallet_fast_unstake; + + // OpenGov + #[runtime::pallet_index(31)] + pub type ConvictionVoting = pallet_conviction_voting; + #[runtime::pallet_index(32)] + pub type Referenda = pallet_referenda; + #[runtime::pallet_index(35)] + pub type Origins = pallet_custom_origins; + #[runtime::pallet_index(36)] + pub type Whitelist = pallet_whitelist; + + // Treasury + #[runtime::pallet_index(37)] + pub type Treasury = pallet_treasury; + + // Parachains pallets. Start indices at 40 to leave room. + #[runtime::pallet_index(41)] + pub type ParachainsOrigin = parachains_origin; + #[runtime::pallet_index(42)] + pub type Configuration = parachains_configuration; + #[runtime::pallet_index(43)] + pub type ParasShared = parachains_shared; + #[runtime::pallet_index(44)] + pub type ParaInclusion = parachains_inclusion; + #[runtime::pallet_index(45)] + pub type ParaInherent = parachains_paras_inherent; + #[runtime::pallet_index(46)] + pub type ParaScheduler = parachains_scheduler; + #[runtime::pallet_index(47)] + pub type Paras = parachains_paras; + #[runtime::pallet_index(48)] + pub type Initializer = parachains_initializer; + #[runtime::pallet_index(49)] + pub type Dmp = parachains_dmp; + // RIP Ump 50 + #[runtime::pallet_index(51)] + pub type Hrmp = parachains_hrmp; + #[runtime::pallet_index(52)] + pub type ParaSessionInfo = parachains_session_info; + #[runtime::pallet_index(53)] + pub type ParasDisputes = parachains_disputes; + #[runtime::pallet_index(54)] + pub type ParasSlashing = parachains_slashing; + #[runtime::pallet_index(56)] + pub type OnDemandAssignmentProvider = parachains_assigner_on_demand; + #[runtime::pallet_index(57)] + pub type CoretimeAssignmentProvider = parachains_assigner_coretime; + + // Parachain Onboarding Pallets. Start indices at 60 to leave room. + #[runtime::pallet_index(60)] + pub type Registrar = paras_registrar; + #[runtime::pallet_index(61)] + pub type Slots = slots; + #[runtime::pallet_index(62)] + pub type ParasSudoWrapper = paras_sudo_wrapper; + #[runtime::pallet_index(63)] + pub type Auctions = auctions; + #[runtime::pallet_index(64)] + pub type Crowdloan = crowdloan; + #[runtime::pallet_index(65)] + pub type AssignedSlots = assigned_slots; + #[runtime::pallet_index(66)] + pub type Coretime = coretime; + + // Pallet for sending XCM. + #[runtime::pallet_index(99)] + pub type XcmPallet = pallet_xcm; + + // Generalized message queue + #[runtime::pallet_index(100)] + pub type MessageQueue = pallet_message_queue; + + // Asset rate. + #[runtime::pallet_index(101)] + pub type AssetRate = pallet_asset_rate; + + // Root testing pallet. + #[runtime::pallet_index(102)] + pub type RootTesting = pallet_root_testing; + + // BEEFY Bridges support. + #[runtime::pallet_index(200)] + pub type Beefy = pallet_beefy; + // MMR leaf construction must be after session in order to have a leaf's next_auth_set + // refer to block. See issue polkadot-fellows/runtimes#160 for details. + #[runtime::pallet_index(201)] + pub type Mmr = pallet_mmr; + #[runtime::pallet_index(202)] + pub type BeefyMmrLeaf = pallet_beefy_mmr; + + // Pallet for migrating Identity to a parachain. To be removed post-migration. + #[runtime::pallet_index(248)] + pub type IdentityMigrator = identity_migrator; } /// The address format for describing accounts. diff --git a/prdoc/pr_3754.prdoc b/prdoc/pr_3754.prdoc new file mode 100644 index 00000000000..94ea6d56608 --- /dev/null +++ b/prdoc/pr_3754.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Migrates Westend to Runtime V2 + +doc: + - audience: Runtime Dev + description: | + This PR migrates Westend from `construct_runtime` to Runtime V2 + as introduced in https://github.com/paritytech/polkadot-sdk/pull/1378 + +crates: + - name: westend-runtime -- GitLab From 7b6b061e32f65b92574deaf8c199855e2c59d4c6 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Thu, 21 Mar 2024 10:00:10 +0100 Subject: [PATCH 008/128] [Backport] version bumps and prdocs reordering 1.9.0 (#3758) This PR backports: - node version bump - `spec_vesion` bump - reordering of the `prdocs` to the appropriate folder from the `1.9.0` release branch --- .../parachains/runtimes/assets/asset-hub-rococo/src/lib.rs | 4 ++-- .../parachains/runtimes/assets/asset-hub-westend/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../runtimes/collectives/collectives-westend/src/lib.rs | 2 +- .../parachains/runtimes/contracts/contracts-rococo/src/lib.rs | 2 +- .../parachains/runtimes/coretime/coretime-rococo/src/lib.rs | 2 +- .../parachains/runtimes/coretime/coretime-westend/src/lib.rs | 2 +- .../parachains/runtimes/glutton/glutton-westend/src/lib.rs | 2 +- cumulus/parachains/runtimes/people/people-rococo/src/lib.rs | 2 +- cumulus/parachains/runtimes/people/people-westend/src/lib.rs | 2 +- .../parachains/runtimes/testing/rococo-parachain/src/lib.rs | 2 +- polkadot/node/primitives/src/lib.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- prdoc/{ => 1.9.0}/pr_1378.prdoc | 0 prdoc/{ => 1.9.0}/pr_1554.prdoc | 0 prdoc/{ => 1.9.0}/pr_1781.prdoc | 0 prdoc/{ => 1.9.0}/pr_2393.prdoc | 0 prdoc/{ => 1.9.0}/pr_3002.prdoc | 0 prdoc/{ => 1.9.0}/pr_3187.prdoc | 0 prdoc/{ => 1.9.0}/pr_3231.prdoc | 0 prdoc/{ => 1.9.0}/pr_3233.prdoc | 0 prdoc/{ => 1.9.0}/pr_3324.prdoc | 0 prdoc/{ => 1.9.0}/pr_3371.prdoc | 0 prdoc/{ => 1.9.0}/pr_3377.prdoc | 0 prdoc/{ => 1.9.0}/pr_3378.prdoc | 0 prdoc/{ => 1.9.0}/pr_3403.prdoc | 0 prdoc/{ => 1.9.0}/pr_3411.prdoc | 0 prdoc/{ => 1.9.0}/pr_3412.prdoc | 0 prdoc/{ => 1.9.0}/pr_3447.prdoc | 0 prdoc/{ => 1.9.0}/pr_3453.prdoc | 0 prdoc/{ => 1.9.0}/pr_3454.prdoc | 0 prdoc/{ => 1.9.0}/pr_3456.prdoc | 0 prdoc/{ => 1.9.0}/pr_3460.prdoc | 0 prdoc/{ => 1.9.0}/pr_3491.prdoc | 0 prdoc/{ => 1.9.0}/pr_3504.prdoc | 0 prdoc/{ => 1.9.0}/pr_3505.prdoc | 0 prdoc/{ => 1.9.0}/pr_3510.prdoc | 0 prdoc/{ => 1.9.0}/pr_3513.prdoc | 0 prdoc/{ => 1.9.0}/pr_3523.prdoc | 0 prdoc/{ => 1.9.0}/pr_3532.prdoc | 0 prdoc/{ => 1.9.0}/pr_3540.prdoc | 0 prdoc/{ => 1.9.0}/pr_3574.prdoc | 0 prdoc/{ => 1.9.0}/pr_3589.prdoc | 0 prdoc/{ => 1.9.0}/pr_3606.prdoc | 0 prdoc/{ => 1.9.0}/pr_3636.prdoc | 0 prdoc/{ => 1.9.0}/pr_3639.prdoc | 0 prdoc/{ => 1.9.0}/pr_3643.prdoc | 0 prdoc/{ => 1.9.0}/pr_3665.prdoc | 0 50 files changed, 16 insertions(+), 16 deletions(-) rename prdoc/{ => 1.9.0}/pr_1378.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_1554.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_1781.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_2393.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3002.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3187.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3231.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3233.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3324.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3371.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3377.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3378.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3403.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3411.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3412.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3447.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3453.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3454.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3456.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3460.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3491.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3504.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3505.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3510.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3513.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3523.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3532.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3540.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3574.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3589.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3606.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3636.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3639.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3643.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3665.prdoc (100%) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index d3d8a495ea3..689d8d56c48 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -113,7 +113,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 711fadff92a..48106b5f302 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -110,7 +110,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index bbafcd3c7dd..bf7483179f2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -202,7 +202,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 4, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 11ab9aecc61..9bdea6b9a7d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -176,7 +176,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 4, diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 55269283e08..d3f588bf25f 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -117,7 +117,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 17bb45f6b1b..e1586c7d9b2 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 26add2ee942..86eb5cdfcaf 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-rococo"), impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 76732c3fccc..c31e474cc2f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-westend"), impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index fe18f48fbb3..cee17cdc7b0 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -100,7 +100,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 2bb5641b4af..ad369583f07 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-rococo"), impl_name: create_runtime_str!("people-rococo"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index b81f7a9c969..c76611ad2a3 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-westend"), impl_name: create_runtime_str!("people-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index b3bea4d4e65..c6006141981 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -107,7 +107,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index d295c21cce1..b6556e0be56 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -58,7 +58,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.8.0"; +pub const NODE_VERSION: &'static str = "1.9.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index a773eeb5cbd..c9f5d81d286 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -149,7 +149,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 24, diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 62821cae7e4..83d47508c7c 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -150,7 +150,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 24, diff --git a/prdoc/pr_1378.prdoc b/prdoc/1.9.0/pr_1378.prdoc similarity index 100% rename from prdoc/pr_1378.prdoc rename to prdoc/1.9.0/pr_1378.prdoc diff --git a/prdoc/pr_1554.prdoc b/prdoc/1.9.0/pr_1554.prdoc similarity index 100% rename from prdoc/pr_1554.prdoc rename to prdoc/1.9.0/pr_1554.prdoc diff --git a/prdoc/pr_1781.prdoc b/prdoc/1.9.0/pr_1781.prdoc similarity index 100% rename from prdoc/pr_1781.prdoc rename to prdoc/1.9.0/pr_1781.prdoc diff --git a/prdoc/pr_2393.prdoc b/prdoc/1.9.0/pr_2393.prdoc similarity index 100% rename from prdoc/pr_2393.prdoc rename to prdoc/1.9.0/pr_2393.prdoc diff --git a/prdoc/pr_3002.prdoc b/prdoc/1.9.0/pr_3002.prdoc similarity index 100% rename from prdoc/pr_3002.prdoc rename to prdoc/1.9.0/pr_3002.prdoc diff --git a/prdoc/pr_3187.prdoc b/prdoc/1.9.0/pr_3187.prdoc similarity index 100% rename from prdoc/pr_3187.prdoc rename to prdoc/1.9.0/pr_3187.prdoc diff --git a/prdoc/pr_3231.prdoc b/prdoc/1.9.0/pr_3231.prdoc similarity index 100% rename from prdoc/pr_3231.prdoc rename to prdoc/1.9.0/pr_3231.prdoc diff --git a/prdoc/pr_3233.prdoc b/prdoc/1.9.0/pr_3233.prdoc similarity index 100% rename from prdoc/pr_3233.prdoc rename to prdoc/1.9.0/pr_3233.prdoc diff --git a/prdoc/pr_3324.prdoc b/prdoc/1.9.0/pr_3324.prdoc similarity index 100% rename from prdoc/pr_3324.prdoc rename to prdoc/1.9.0/pr_3324.prdoc diff --git a/prdoc/pr_3371.prdoc b/prdoc/1.9.0/pr_3371.prdoc similarity index 100% rename from prdoc/pr_3371.prdoc rename to prdoc/1.9.0/pr_3371.prdoc diff --git a/prdoc/pr_3377.prdoc b/prdoc/1.9.0/pr_3377.prdoc similarity index 100% rename from prdoc/pr_3377.prdoc rename to prdoc/1.9.0/pr_3377.prdoc diff --git a/prdoc/pr_3378.prdoc b/prdoc/1.9.0/pr_3378.prdoc similarity index 100% rename from prdoc/pr_3378.prdoc rename to prdoc/1.9.0/pr_3378.prdoc diff --git a/prdoc/pr_3403.prdoc b/prdoc/1.9.0/pr_3403.prdoc similarity index 100% rename from prdoc/pr_3403.prdoc rename to prdoc/1.9.0/pr_3403.prdoc diff --git a/prdoc/pr_3411.prdoc b/prdoc/1.9.0/pr_3411.prdoc similarity index 100% rename from prdoc/pr_3411.prdoc rename to prdoc/1.9.0/pr_3411.prdoc diff --git a/prdoc/pr_3412.prdoc b/prdoc/1.9.0/pr_3412.prdoc similarity index 100% rename from prdoc/pr_3412.prdoc rename to prdoc/1.9.0/pr_3412.prdoc diff --git a/prdoc/pr_3447.prdoc b/prdoc/1.9.0/pr_3447.prdoc similarity index 100% rename from prdoc/pr_3447.prdoc rename to prdoc/1.9.0/pr_3447.prdoc diff --git a/prdoc/pr_3453.prdoc b/prdoc/1.9.0/pr_3453.prdoc similarity index 100% rename from prdoc/pr_3453.prdoc rename to prdoc/1.9.0/pr_3453.prdoc diff --git a/prdoc/pr_3454.prdoc b/prdoc/1.9.0/pr_3454.prdoc similarity index 100% rename from prdoc/pr_3454.prdoc rename to prdoc/1.9.0/pr_3454.prdoc diff --git a/prdoc/pr_3456.prdoc b/prdoc/1.9.0/pr_3456.prdoc similarity index 100% rename from prdoc/pr_3456.prdoc rename to prdoc/1.9.0/pr_3456.prdoc diff --git a/prdoc/pr_3460.prdoc b/prdoc/1.9.0/pr_3460.prdoc similarity index 100% rename from prdoc/pr_3460.prdoc rename to prdoc/1.9.0/pr_3460.prdoc diff --git a/prdoc/pr_3491.prdoc b/prdoc/1.9.0/pr_3491.prdoc similarity index 100% rename from prdoc/pr_3491.prdoc rename to prdoc/1.9.0/pr_3491.prdoc diff --git a/prdoc/pr_3504.prdoc b/prdoc/1.9.0/pr_3504.prdoc similarity index 100% rename from prdoc/pr_3504.prdoc rename to prdoc/1.9.0/pr_3504.prdoc diff --git a/prdoc/pr_3505.prdoc b/prdoc/1.9.0/pr_3505.prdoc similarity index 100% rename from prdoc/pr_3505.prdoc rename to prdoc/1.9.0/pr_3505.prdoc diff --git a/prdoc/pr_3510.prdoc b/prdoc/1.9.0/pr_3510.prdoc similarity index 100% rename from prdoc/pr_3510.prdoc rename to prdoc/1.9.0/pr_3510.prdoc diff --git a/prdoc/pr_3513.prdoc b/prdoc/1.9.0/pr_3513.prdoc similarity index 100% rename from prdoc/pr_3513.prdoc rename to prdoc/1.9.0/pr_3513.prdoc diff --git a/prdoc/pr_3523.prdoc b/prdoc/1.9.0/pr_3523.prdoc similarity index 100% rename from prdoc/pr_3523.prdoc rename to prdoc/1.9.0/pr_3523.prdoc diff --git a/prdoc/pr_3532.prdoc b/prdoc/1.9.0/pr_3532.prdoc similarity index 100% rename from prdoc/pr_3532.prdoc rename to prdoc/1.9.0/pr_3532.prdoc diff --git a/prdoc/pr_3540.prdoc b/prdoc/1.9.0/pr_3540.prdoc similarity index 100% rename from prdoc/pr_3540.prdoc rename to prdoc/1.9.0/pr_3540.prdoc diff --git a/prdoc/pr_3574.prdoc b/prdoc/1.9.0/pr_3574.prdoc similarity index 100% rename from prdoc/pr_3574.prdoc rename to prdoc/1.9.0/pr_3574.prdoc diff --git a/prdoc/pr_3589.prdoc b/prdoc/1.9.0/pr_3589.prdoc similarity index 100% rename from prdoc/pr_3589.prdoc rename to prdoc/1.9.0/pr_3589.prdoc diff --git a/prdoc/pr_3606.prdoc b/prdoc/1.9.0/pr_3606.prdoc similarity index 100% rename from prdoc/pr_3606.prdoc rename to prdoc/1.9.0/pr_3606.prdoc diff --git a/prdoc/pr_3636.prdoc b/prdoc/1.9.0/pr_3636.prdoc similarity index 100% rename from prdoc/pr_3636.prdoc rename to prdoc/1.9.0/pr_3636.prdoc diff --git a/prdoc/pr_3639.prdoc b/prdoc/1.9.0/pr_3639.prdoc similarity index 100% rename from prdoc/pr_3639.prdoc rename to prdoc/1.9.0/pr_3639.prdoc diff --git a/prdoc/pr_3643.prdoc b/prdoc/1.9.0/pr_3643.prdoc similarity index 100% rename from prdoc/pr_3643.prdoc rename to prdoc/1.9.0/pr_3643.prdoc diff --git a/prdoc/pr_3665.prdoc b/prdoc/1.9.0/pr_3665.prdoc similarity index 100% rename from prdoc/pr_3665.prdoc rename to prdoc/1.9.0/pr_3665.prdoc -- GitLab From 75074952a859f90213ea25257b71ec2189dbcfc1 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Thu, 21 Mar 2024 10:00:40 +0100 Subject: [PATCH 009/128] [Backport] Reformat release notes generation (#3759) This PR backports small reformatting of the release notes templates. --- scripts/release/build-changelogs.sh | 1 + scripts/release/templates/audience.md.tera | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/scripts/release/build-changelogs.sh b/scripts/release/build-changelogs.sh index a9275f45a50..cbfb7ad0e48 100755 --- a/scripts/release/build-changelogs.sh +++ b/scripts/release/build-changelogs.sh @@ -48,6 +48,7 @@ for audience in "${audiences[@]}"; do echo "Processing audience: $audience ($audience_id)" export TARGET_AUDIENCE=$audience tera -t "${TEMPLATE_AUDIENCE}" --env --env-key env "${CONTEXT_JSON}" > "$OUTPUT/relnote_${audience_id}.md" + cat "$OUTPUT/relnote_${audience_id}.md" >> "$OUTPUT/relnote_combined.md" done # Show the files diff --git a/scripts/release/templates/audience.md.tera b/scripts/release/templates/audience.md.tera index dc507053dd5..0b47850e3a3 100644 --- a/scripts/release/templates/audience.md.tera +++ b/scripts/release/templates/audience.md.tera @@ -1,11 +1,9 @@ -## Release {{ env.PRODUCT }} {{ env.VERSION }} - -Changelog for `{{ env.TARGET_AUDIENCE }}`. +### Changelog for `{{ env.TARGET_AUDIENCE }}` {% for file in prdoc -%} -#### PR #{{file.doc_filename.number}}: {{ file.content.title }} {% for doc_item in file.content.doc %} {%- if doc_item.audience == env.TARGET_AUDIENCE %} +#### [#{{file.doc_filename.number}}]: {{ file.content.title }} {{ doc_item.description }} {% endif -%} -- GitLab From 4842faf65d3628586d304fbcb6cb19b17b4a629c Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Thu, 21 Mar 2024 12:10:45 +0200 Subject: [PATCH 010/128] Elastic scaling: runtime dependency tracking and enactment (#3479) Changes needed to implement the runtime part of elastic scaling: https://github.com/paritytech/polkadot-sdk/issues/3131, https://github.com/paritytech/polkadot-sdk/issues/3132, https://github.com/paritytech/polkadot-sdk/issues/3202 Also fixes https://github.com/paritytech/polkadot-sdk/issues/3675 TODOs: - [x] storage migration - [x] optimise process_candidates from O(N^2) - [x] drop backable candidates which form cycles - [x] fix unit tests - [x] add more unit tests - [x] check the runtime APIs which use the pending availability storage. We need to expose all of them, see https://github.com/paritytech/polkadot-sdk/issues/3576 - [x] optimise the candidate selection. we're currently picking randomly until we satisfy the weight limit. we need to be smart about not breaking candidate chains while being fair to all paras - https://github.com/paritytech/polkadot-sdk/pull/3573 Relies on the changes made in https://github.com/paritytech/polkadot-sdk/pull/3233 in terms of the inclusion policy and the candidate ordering --------- Signed-off-by: alindima Co-authored-by: command-bot <> Co-authored-by: eskimor --- .../src/runtime/inclusion.md | 11 +- .../src/runtime/parainherent.md | 2 +- .../src/runtime/scheduler.md | 1 - polkadot/runtime/parachains/src/builder.rs | 109 +- .../parachains/src/inclusion/migration.rs | 317 ++++ .../runtime/parachains/src/inclusion/mod.rs | 803 ++++----- .../runtime/parachains/src/inclusion/tests.rs | 1241 ++++++++----- .../src/paras_inherent/benchmarking.rs | 8 - .../parachains/src/paras_inherent/mod.rs | 599 +++--- .../parachains/src/paras_inherent/tests.rs | 1604 +++++++++++++++-- .../parachains/src/runtime_api_impl/v7.rs | 84 +- polkadot/runtime/parachains/src/scheduler.rs | 10 - polkadot/runtime/parachains/src/util.rs | 19 +- polkadot/runtime/rococo/src/lib.rs | 2 + .../runtime_parachains_paras_inherent.rs | 421 +++-- polkadot/runtime/westend/src/lib.rs | 1 + .../runtime_parachains_paras_inherent.rs | 583 +++--- prdoc/pr_3479.prdoc | 8 + 18 files changed, 4075 insertions(+), 1748 deletions(-) create mode 100644 polkadot/runtime/parachains/src/inclusion/migration.rs create mode 100644 prdoc/pr_3479.prdoc diff --git a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md index f6a32a01d50..fd74f33253b 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md @@ -147,15 +147,16 @@ All failed checks should lead to an unrecoverable error making the block invalid // return a vector of cleaned-up core IDs. } ``` -* `force_enact(ParaId)`: Forcibly enact the candidate with the given ID as though it had been deemed available by - bitfields. Is a no-op if there is no candidate pending availability for this para-id. This should generally not be - used but it is useful during execution of Runtime APIs, where the changes to the state are expected to be discarded - directly after. +* `force_enact(ParaId)`: Forcibly enact the pending candidates of the given paraid as though they had been deemed + available by bitfields. Is a no-op if there is no candidate pending availability for this para-id. + If there are multiple candidates pending availability for this para-id, it will enact all of + them. This should generally not be used but it is useful during execution of Runtime APIs, + where the changes to the state are expected to be discarded directly after. * `candidate_pending_availability(ParaId) -> Option`: returns the `CommittedCandidateReceipt` pending availability for the para provided, if any. * `pending_availability(ParaId) -> Option`: returns the metadata around the candidate pending availability for the para, if any. -* `collect_disputed(disputed: Vec) -> Vec`: Sweeps through all paras pending availability. If +* `free_disputed(disputed: Vec) -> Vec`: Sweeps through all paras pending availability. If the candidate hash is one of the disputed candidates, then clean up the corresponding storage for that candidate and the commitments. Return a vector of cleaned-up core IDs. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md index 5419ddae83d..1345f0eea95 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md @@ -17,7 +17,7 @@ There are a couple of important notes to the operations in this inherent as they this fork. 1. When disputes are initiated, we remove the block from pending availability. This allows us to roll back chains to the block before blocks are included as opposed to backing. It's important to do this before processing bitfields. -1. `Inclusion::collect_disputed` is kind of expensive so it's important to gate this on whether there are actually any +1. `Inclusion::free_disputed` is kind of expensive so it's important to gate this on whether there are actually any new disputes. Which should be never. 1. And we don't accept parablocks that have open disputes or disputes that have concluded against the candidate. It's important to import dispute statements before backing, but this is already the case as disputes are imported before diff --git a/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md b/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md index 32a7fe652db..04b221a83e5 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md @@ -285,7 +285,6 @@ No finalization routine runs for this module. - This clears them from `Scheduled` and marks each corresponding `core` in the `AvailabilityCores` as occupied. - Since both the availability cores and the newly-occupied cores lists are sorted ascending, this method can be implemented efficiently. -- `core_para(CoreIndex) -> ParaId`: return the currently-scheduled or occupied ParaId for the given core. - `group_validators(GroupIndex) -> Option>`: return all validators in a given group, if the group index is valid for this session. - `availability_timeout_predicate() -> Option bool>`: returns an optional predicate diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 05f6845f3b7..73617010f6d 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -40,7 +40,7 @@ use sp_runtime::{ RuntimeAppPublic, }; use sp_std::{ - collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}, prelude::Vec, vec, }; @@ -104,6 +104,8 @@ pub(crate) struct BenchBuilder { code_upgrade: Option, /// Specifies whether the claimqueue should be filled. fill_claimqueue: bool, + /// Cores which should not be available when being populated with pending candidates. + unavailable_cores: Vec, _phantom: sp_std::marker::PhantomData, } @@ -133,6 +135,7 @@ impl BenchBuilder { elastic_paras: Default::default(), code_upgrade: None, fill_claimqueue: true, + unavailable_cores: vec![], _phantom: sp_std::marker::PhantomData::, } } @@ -149,6 +152,12 @@ impl BenchBuilder { self } + /// Set the cores which should not be available when being populated with pending candidates. + pub(crate) fn set_unavailable_cores(mut self, unavailable_cores: Vec) -> Self { + self.unavailable_cores = unavailable_cores; + self + } + /// Set a map from para id seed to number of validity votes. pub(crate) fn set_backed_and_concluding_paras( mut self, @@ -159,7 +168,6 @@ impl BenchBuilder { } /// Set a map from para id seed to number of cores assigned to it. - #[cfg(feature = "runtime-benchmarks")] pub(crate) fn set_elastic_paras(mut self, elastic_paras: BTreeMap) -> Self { self.elastic_paras = elastic_paras; self @@ -284,11 +292,13 @@ impl BenchBuilder { core_idx: CoreIndex, candidate_hash: CandidateHash, availability_votes: BitVec, + commitments: CandidateCommitments, ) -> inclusion::CandidatePendingAvailability> { inclusion::CandidatePendingAvailability::>::new( core_idx, // core candidate_hash, // hash Self::candidate_descriptor_mock(), // candidate descriptor + commitments, // commitments availability_votes, // availability votes Default::default(), // backers Zero::zero(), // relay parent @@ -309,12 +319,6 @@ impl BenchBuilder { availability_votes: BitVec, candidate_hash: CandidateHash, ) { - let candidate_availability = Self::candidate_availability_mock( - group_idx, - core_idx, - candidate_hash, - availability_votes, - ); let commitments = CandidateCommitments:: { upward_messages: Default::default(), horizontal_messages: Default::default(), @@ -323,16 +327,29 @@ impl BenchBuilder { processed_downward_messages: 0, hrmp_watermark: 0u32.into(), }; - inclusion::PendingAvailability::::insert(para_id, candidate_availability); - inclusion::PendingAvailabilityCommitments::::insert(¶_id, commitments); + let candidate_availability = Self::candidate_availability_mock( + group_idx, + core_idx, + candidate_hash, + availability_votes, + commitments, + ); + inclusion::PendingAvailability::::mutate(para_id, |maybe_andidates| { + if let Some(candidates) = maybe_andidates { + candidates.push_back(candidate_availability); + } else { + *maybe_andidates = + Some([candidate_availability].into_iter().collect::>()); + } + }); } /// Create an `AvailabilityBitfield` where `concluding` is a map where each key is a core index /// that is concluding and `cores` is the total number of cores in the system. - fn availability_bitvec(concluding: &BTreeMap, cores: usize) -> AvailabilityBitfield { + fn availability_bitvec(concluding_cores: &BTreeSet, cores: usize) -> AvailabilityBitfield { let mut bitfields = bitvec::bitvec![u8, bitvec::order::Lsb0; 0; 0]; for i in 0..cores { - if concluding.get(&(i as u32)).is_some() { + if concluding_cores.contains(&(i as u32)) { bitfields.push(true); } else { bitfields.push(false) @@ -356,13 +373,13 @@ impl BenchBuilder { } } - /// Register `cores` count of parachains. + /// Register `n_paras` count of parachains. /// /// Note that this must be called at least 2 sessions before the target session as there is a /// n+2 session delay for the scheduled actions to take effect. - fn setup_para_ids(cores: usize) { + fn setup_para_ids(n_paras: usize) { // make sure parachains exist prior to session change. - for i in 0..cores { + for i in 0..n_paras { let para_id = ParaId::from(i as u32); let validation_code = mock_validation_code(); @@ -472,24 +489,8 @@ impl BenchBuilder { let validators = self.validators.as_ref().expect("must have some validators prior to calling"); - let availability_bitvec = Self::availability_bitvec(concluding_paras, total_cores); - - let bitfields: Vec> = validators - .iter() - .enumerate() - .map(|(i, public)| { - let unchecked_signed = UncheckedSigned::::benchmark_sign( - public, - availability_bitvec.clone(), - &self.signing_context(), - ValidatorIndex(i as u32), - ); - - unchecked_signed - }) - .collect(); - let mut current_core_idx = 0u32; + let mut concluding_cores = BTreeSet::new(); for (seed, _) in concluding_paras.iter() { // make sure the candidates that will be concluding are marked as pending availability. @@ -505,13 +506,34 @@ impl BenchBuilder { para_id, core_idx, group_idx, - Self::validator_availability_votes_yes(validators.len()), + // No validators have made this candidate available yet. + bitvec::bitvec![u8, bitvec::order::Lsb0; 0; validators.len()], CandidateHash(H256::from(byte32_slice_from(current_core_idx))), ); + if !self.unavailable_cores.contains(¤t_core_idx) { + concluding_cores.insert(current_core_idx); + } current_core_idx += 1; } } + let availability_bitvec = Self::availability_bitvec(&concluding_cores, total_cores); + + let bitfields: Vec> = validators + .iter() + .enumerate() + .map(|(i, public)| { + let unchecked_signed = UncheckedSigned::::benchmark_sign( + public, + availability_bitvec.clone(), + &self.signing_context(), + ValidatorIndex(i as u32), + ); + + unchecked_signed + }) + .collect(); + bitfields } @@ -522,7 +544,7 @@ impl BenchBuilder { /// validity votes. fn create_backed_candidates( &self, - cores_with_backed_candidates: &BTreeMap, + paras_with_backed_candidates: &BTreeMap, elastic_paras: &BTreeMap, includes_code_upgrade: Option, ) -> Vec> { @@ -531,7 +553,7 @@ impl BenchBuilder { let config = configuration::Pallet::::config(); let mut current_core_idx = 0u32; - cores_with_backed_candidates + paras_with_backed_candidates .iter() .flat_map(|(seed, num_votes)| { assert!(*num_votes <= validators.len() as u32); @@ -760,7 +782,7 @@ impl BenchBuilder { // NOTE: there is an n+2 session delay for these actions to take effect. // We are currently in Session 0, so these changes will take effect in Session 2. - Self::setup_para_ids(used_cores); + Self::setup_para_ids(used_cores - extra_cores); configuration::ActiveConfig::::mutate(|c| { c.scheduler_params.num_cores = used_cores as u32; }); @@ -782,11 +804,11 @@ impl BenchBuilder { let disputes = builder.create_disputes( builder.backed_and_concluding_paras.len() as u32, - used_cores as u32, + (used_cores - extra_cores) as u32, builder.dispute_sessions.as_slice(), ); let mut disputed_cores = (builder.backed_and_concluding_paras.len() as u32.. - used_cores as u32) + ((used_cores - extra_cores) as u32)) .into_iter() .map(|idx| (idx, 0)) .collect::>(); @@ -794,7 +816,7 @@ impl BenchBuilder { let mut all_cores = builder.backed_and_concluding_paras.clone(); all_cores.append(&mut disputed_cores); - assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores as usize,); + assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores - extra_cores); // Mark all the used cores as occupied. We expect that there are // `backed_and_concluding_paras` that are pending availability and that there are @@ -831,7 +853,7 @@ impl BenchBuilder { .keys() .flat_map(|para_id| { (0..elastic_paras.get(¶_id).cloned().unwrap_or(1)) - .map(|_para_local_core_idx| { + .filter_map(|_para_local_core_idx| { let ttl = configuration::Pallet::::config().scheduler_params.ttl; // Load an assignment into provider so that one is present to pop let assignment = @@ -844,8 +866,13 @@ impl BenchBuilder { CoreIndex(core_idx), [ParasEntry::new(assignment, now + ttl)].into(), ); + let res = if builder.unavailable_cores.contains(&core_idx) { + None + } else { + Some(entry) + }; core_idx += 1; - entry + res }) .collect::>)>>() }) diff --git a/polkadot/runtime/parachains/src/inclusion/migration.rs b/polkadot/runtime/parachains/src/inclusion/migration.rs new file mode 100644 index 00000000000..1e63b209f4e --- /dev/null +++ b/polkadot/runtime/parachains/src/inclusion/migration.rs @@ -0,0 +1,317 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +pub use v1::MigrateToV1; + +pub mod v0 { + use crate::inclusion::{Config, Pallet}; + use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; + use frame_support::{storage_alias, Twox64Concat}; + use frame_system::pallet_prelude::BlockNumberFor; + use parity_scale_codec::{Decode, Encode}; + use primitives::{ + AvailabilityBitfield, CandidateCommitments, CandidateDescriptor, CandidateHash, CoreIndex, + GroupIndex, Id as ParaId, ValidatorIndex, + }; + use scale_info::TypeInfo; + + #[derive(Encode, Decode, PartialEq, TypeInfo, Clone, Debug)] + pub struct CandidatePendingAvailability { + pub core: CoreIndex, + pub hash: CandidateHash, + pub descriptor: CandidateDescriptor, + pub availability_votes: BitVec, + pub backers: BitVec, + pub relay_parent_number: N, + pub backed_in_number: N, + pub backing_group: GroupIndex, + } + + #[derive(Encode, Decode, TypeInfo, Debug, PartialEq)] + pub struct AvailabilityBitfieldRecord { + pub bitfield: AvailabilityBitfield, + pub submitted_at: N, + } + + #[storage_alias] + pub type PendingAvailability = StorageMap< + Pallet, + Twox64Concat, + ParaId, + CandidatePendingAvailability<::Hash, BlockNumberFor>, + >; + + #[storage_alias] + pub type PendingAvailabilityCommitments = + StorageMap, Twox64Concat, ParaId, CandidateCommitments>; + + #[storage_alias] + pub type AvailabilityBitfields = StorageMap< + Pallet, + Twox64Concat, + ValidatorIndex, + AvailabilityBitfieldRecord>, + >; +} + +mod v1 { + use super::v0::{ + AvailabilityBitfields, PendingAvailability as V0PendingAvailability, + PendingAvailabilityCommitments as V0PendingAvailabilityCommitments, + }; + use crate::inclusion::{ + CandidatePendingAvailability as V1CandidatePendingAvailability, Config, Pallet, + PendingAvailability as V1PendingAvailability, + }; + use frame_support::{traits::OnRuntimeUpgrade, weights::Weight}; + use sp_core::Get; + use sp_std::{collections::vec_deque::VecDeque, vec::Vec}; + + #[cfg(feature = "try-runtime")] + use frame_support::{ + ensure, + traits::{GetStorageVersion, StorageVersion}, + }; + #[cfg(feature = "try-runtime")] + use parity_scale_codec::{Decode, Encode}; + + pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); + + impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: crate::inclusion::LOG_TARGET, "Running pre_upgrade() for inclusion MigrateToV1"); + let candidates_before_upgrade = V0PendingAvailability::::iter().count(); + let commitments_before_upgrade = V0PendingAvailabilityCommitments::::iter().count(); + + if candidates_before_upgrade != commitments_before_upgrade { + log::warn!( + target: crate::inclusion::LOG_TARGET, + "Number of pending candidates differ from the number of pending commitments. {} vs {}", + candidates_before_upgrade, + commitments_before_upgrade + ); + } + + Ok((candidates_before_upgrade as u32).encode()) + } + + fn on_runtime_upgrade() -> Weight { + let mut weight: Weight = Weight::zero(); + + let v0_candidates: Vec<_> = V0PendingAvailability::::drain().collect(); + + for (para_id, candidate) in v0_candidates { + let commitments = V0PendingAvailabilityCommitments::::take(para_id); + // One write for each removal (one candidate and one commitment). + weight = weight.saturating_add(T::DbWeight::get().writes(2)); + + if let Some(commitments) = commitments { + let mut per_para = VecDeque::new(); + per_para.push_back(V1CandidatePendingAvailability { + core: candidate.core, + hash: candidate.hash, + descriptor: candidate.descriptor, + availability_votes: candidate.availability_votes, + backers: candidate.backers, + relay_parent_number: candidate.relay_parent_number, + backed_in_number: candidate.backed_in_number, + backing_group: candidate.backing_group, + commitments, + }); + V1PendingAvailability::::insert(para_id, per_para); + + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + } + + // should've already been drained by the above for loop, but as a sanity check, in case + // there are more commitments than candidates. + // V0PendingAvailabilityCommitments should not contain too many keys so removing + // everything at once should be safe + let res = V0PendingAvailabilityCommitments::::clear(u32::MAX, None); + weight = weight.saturating_add( + T::DbWeight::get().reads_writes(res.loops as u64, res.backend as u64), + ); + + // AvailabilityBitfields should not contain too many keys so removing everything at once + // should be safe. + let res = AvailabilityBitfields::::clear(u32::MAX, None); + weight = weight.saturating_add( + T::DbWeight::get().reads_writes(res.loops as u64, res.backend as u64), + ); + + weight + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: crate::inclusion::LOG_TARGET, "Running post_upgrade() for inclusion MigrateToV1"); + ensure!( + Pallet::::on_chain_storage_version() >= StorageVersion::new(1), + "Storage version should be >= 1 after the migration" + ); + + let candidates_before_upgrade = + u32::decode(&mut &state[..]).expect("Was properly encoded") as usize; + let candidates_after_upgrade = V1PendingAvailability::::iter().fold( + 0usize, + |mut acc, (_paraid, para_candidates)| { + acc += para_candidates.len(); + acc + }, + ); + + ensure!( + candidates_before_upgrade == candidates_after_upgrade, + "Number of pending candidates should be the same as the one before the upgrade." + ); + ensure!( + V0PendingAvailability::::iter().next() == None, + "Pending availability candidates storage v0 should have been removed" + ); + ensure!( + V0PendingAvailabilityCommitments::::iter().next() == None, + "Pending availability commitments storage should have been removed" + ); + ensure!( + AvailabilityBitfields::::iter().next() == None, + "Availability bitfields storage should have been removed" + ); + + Ok(()) + } + } + + /// Migrate to v1 inclusion module storage. + /// - merges the `PendingAvailabilityCommitments` into the `CandidatePendingAvailability` + /// storage + /// - removes the `AvailabilityBitfields` storage, which was never read. + pub type MigrateToV1 = frame_support::migrations::VersionedMigration< + 0, + 1, + VersionUncheckedMigrateToV1, + Pallet, + ::DbWeight, + >; +} + +#[cfg(test)] +mod tests { + use super::{v1::VersionUncheckedMigrateToV1, *}; + use crate::{ + inclusion::{ + CandidatePendingAvailability as V1CandidatePendingAvailability, + PendingAvailability as V1PendingAvailability, *, + }, + mock::{new_test_ext, MockGenesisConfig, Test}, + }; + use frame_support::traits::OnRuntimeUpgrade; + use primitives::{AvailabilityBitfield, Id as ParaId}; + use test_helpers::{dummy_candidate_commitments, dummy_candidate_descriptor, dummy_hash}; + + #[test] + fn migrate_to_v1() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + // No data to migrate. + assert_eq!( + as OnRuntimeUpgrade>::on_runtime_upgrade(), + Weight::zero() + ); + assert!(V1PendingAvailability::::iter().next().is_none()); + + let mut expected = vec![]; + + for i in 1..5 { + let descriptor = dummy_candidate_descriptor(dummy_hash()); + v0::PendingAvailability::::insert( + ParaId::from(i), + v0::CandidatePendingAvailability { + core: CoreIndex(i), + descriptor: descriptor.clone(), + relay_parent_number: i, + hash: CandidateHash(dummy_hash()), + availability_votes: Default::default(), + backed_in_number: i, + backers: Default::default(), + backing_group: GroupIndex(i), + }, + ); + v0::PendingAvailabilityCommitments::::insert( + ParaId::from(i), + dummy_candidate_commitments(HeadData(vec![i as _])), + ); + + v0::AvailabilityBitfields::::insert( + ValidatorIndex(i), + v0::AvailabilityBitfieldRecord { + bitfield: AvailabilityBitfield(Default::default()), + submitted_at: i, + }, + ); + + expected.push(( + ParaId::from(i), + [V1CandidatePendingAvailability { + core: CoreIndex(i), + descriptor, + relay_parent_number: i, + hash: CandidateHash(dummy_hash()), + availability_votes: Default::default(), + backed_in_number: i, + backers: Default::default(), + backing_group: GroupIndex(i), + commitments: dummy_candidate_commitments(HeadData(vec![i as _])), + }] + .into_iter() + .collect::>(), + )); + } + // add some wrong data also, candidates without commitments or commitments without + // candidates. + v0::PendingAvailability::::insert( + ParaId::from(6), + v0::CandidatePendingAvailability { + core: CoreIndex(6), + descriptor: dummy_candidate_descriptor(dummy_hash()), + relay_parent_number: 6, + hash: CandidateHash(dummy_hash()), + availability_votes: Default::default(), + backed_in_number: 6, + backers: Default::default(), + backing_group: GroupIndex(6), + }, + ); + v0::PendingAvailabilityCommitments::::insert( + ParaId::from(7), + dummy_candidate_commitments(HeadData(vec![7 as _])), + ); + + // For tests, db weight is zero. + assert_eq!( + as OnRuntimeUpgrade>::on_runtime_upgrade(), + Weight::zero() + ); + + assert_eq!(v0::PendingAvailabilityCommitments::::iter().next(), None); + assert_eq!(v0::PendingAvailability::::iter().next(), None); + assert_eq!(v0::AvailabilityBitfields::::iter().next(), None); + + let mut actual = V1PendingAvailability::::iter().collect::>(); + actual.sort_by(|(id1, _), (id2, _)| id1.cmp(id2)); + expected.sort_by(|(id1, _), (id2, _)| id1.cmp(id2)); + + assert_eq!(actual, expected); + }); + } +} diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 16e2e93b561..e77f8d15b40 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -23,31 +23,35 @@ use crate::{ configuration::{self, HostConfiguration}, disputes, dmp, hrmp, paras::{self, SetGoAhead}, - scheduler::{self, AvailabilityTimeoutStatus}, + scheduler, shared::{self, AllowedRelayParentsTracker}, + util::make_persisted_validation_data_with_parent, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::{ defensive, pallet_prelude::*, - traits::{Defensive, EnqueueMessage, Footprint, QueueFootprint}, + traits::{EnqueueMessage, Footprint, QueueFootprint}, BoundedSlice, }; use frame_system::pallet_prelude::*; use pallet_message_queue::OnQueueChanged; use parity_scale_codec::{Decode, Encode}; use primitives::{ - effective_minimum_backing_votes, supermajority_threshold, well_known_keys, - AvailabilityBitfield, BackedCandidate, CandidateCommitments, CandidateDescriptor, - CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, - HeadData, Id as ParaId, SignedAvailabilityBitfields, SigningContext, UpwardMessage, - ValidatorId, ValidatorIndex, ValidityAttestation, + effective_minimum_backing_votes, supermajority_threshold, well_known_keys, BackedCandidate, + CandidateCommitments, CandidateDescriptor, CandidateHash, CandidateReceipt, + CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, HeadData, Id as ParaId, + SignedAvailabilityBitfields, SigningContext, UpwardMessage, ValidatorId, ValidatorIndex, + ValidityAttestation, }; use scale_info::TypeInfo; use sp_runtime::{traits::One, DispatchError, SaturatedConversion, Saturating}; #[cfg(feature = "std")] use sp_std::fmt; -use sp_std::{collections::btree_set::BTreeSet, prelude::*}; +use sp_std::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}, + prelude::*, +}; pub use pallet::*; @@ -57,6 +61,8 @@ pub(crate) mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod migration; + pub trait WeightInfo { fn receive_upward_messages(i: u32) -> Weight; } @@ -80,20 +86,8 @@ impl WeightInfo for () { /// `configuration` pallet to check these values before setting. pub const MAX_UPWARD_MESSAGE_SIZE_BOUND: u32 = 128 * 1024; -/// A bitfield signed by a validator indicating that it is keeping its piece of the erasure-coding -/// for any backed candidates referred to by a `1` bit available. -/// -/// The bitfield's signature should be checked at the point of submission. Afterwards it can be -/// dropped. -#[derive(Encode, Decode, TypeInfo)] -#[cfg_attr(test, derive(Debug))] -pub struct AvailabilityBitfieldRecord { - bitfield: AvailabilityBitfield, // one bit per core. - submitted_at: N, // for accounting, as meaning of bits may change over time. -} - /// A backed candidate pending availability. -#[derive(Encode, Decode, PartialEq, TypeInfo)] +#[derive(Encode, Decode, PartialEq, TypeInfo, Clone)] #[cfg_attr(test, derive(Debug))] pub struct CandidatePendingAvailability { /// The availability core this is assigned to. @@ -102,6 +96,8 @@ pub struct CandidatePendingAvailability { hash: CandidateHash, /// The candidate descriptor. descriptor: CandidateDescriptor, + /// The candidate commitments. + commitments: CandidateCommitments, /// The received availability votes. One bit per validator. availability_votes: BitVec, /// The backers of the candidate pending availability. @@ -121,8 +117,11 @@ impl CandidatePendingAvailability { } /// Get the relay-chain block number this was backed in. - pub(crate) fn backed_in_number(&self) -> &N { - &self.backed_in_number + pub(crate) fn backed_in_number(&self) -> N + where + N: Clone, + { + self.backed_in_number.clone() } /// Get the core index. @@ -140,6 +139,11 @@ impl CandidatePendingAvailability { &self.descriptor } + /// Get the candidate commitments. + pub(crate) fn candidate_commitments(&self) -> &CandidateCommitments { + &self.commitments + } + /// Get the candidate's relay parent's number. pub(crate) fn relay_parent_number(&self) -> N where @@ -148,11 +152,22 @@ impl CandidatePendingAvailability { self.relay_parent_number.clone() } + /// Get the candidate backing group. + pub(crate) fn backing_group(&self) -> GroupIndex { + self.backing_group + } + + /// Get the candidate's backers. + pub(crate) fn backers(&self) -> &BitVec { + &self.backers + } + #[cfg(any(feature = "runtime-benchmarks", test))] pub(crate) fn new( core: CoreIndex, hash: CandidateHash, descriptor: CandidateDescriptor, + commitments: CandidateCommitments, availability_votes: BitVec, backers: BitVec, relay_parent_number: N, @@ -163,6 +178,7 @@ impl CandidatePendingAvailability { core, hash, descriptor, + commitments, availability_votes, backers, relay_parent_number, @@ -253,8 +269,10 @@ pub type MaxUmpMessageLenOf = pub mod pallet { use super::*; + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] @@ -297,30 +315,10 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// Validator indices are out of order or contains duplicates. - UnsortedOrDuplicateValidatorIndices, - /// Dispute statement sets are out of order or contain duplicates. - UnsortedOrDuplicateDisputeStatementSet, - /// Backed candidates are out of order (core index) or contain duplicates. - UnsortedOrDuplicateBackedCandidates, - /// A different relay parent was provided compared to the on-chain stored one. - UnexpectedRelayParent, - /// Availability bitfield has unexpected size. - WrongBitfieldSize, - /// Bitfield consists of zeros only. - BitfieldAllZeros, - /// Multiple bitfields submitted by same validator or validators out of order by index. - BitfieldDuplicateOrUnordered, /// Validator index out of bounds. ValidatorIndexOutOfBounds, - /// Invalid signature - InvalidBitfieldSignature, /// Candidate submitted but para not scheduled. UnscheduledCandidate, - /// Candidate scheduled despite pending candidate already existing for the para. - CandidateScheduledBeforeParaFree, - /// Scheduled cores out of order. - ScheduledOutOfOrder, /// Head data exceeds the configured maximum. HeadDataTooLarge, /// Code upgrade prematurely. @@ -356,31 +354,22 @@ pub mod pallet { /// The `para_head` hash in the candidate descriptor doesn't match the hash of the actual /// para head in the commitments. ParaHeadMismatch, - /// A bitfield that references a freed core, - /// either intentionally or as part of a concluded - /// invalid dispute. - BitfieldReferencesFreedCore, } - /// The latest bitfield for each validator, referred to by their index in the validator set. - #[pallet::storage] - pub(crate) type AvailabilityBitfields = - StorageMap<_, Twox64Concat, ValidatorIndex, AvailabilityBitfieldRecord>>; - - /// Candidates pending availability by `ParaId`. + /// Candidates pending availability by `ParaId`. They form a chain starting from the latest + /// included head of the para. + /// Use a different prefix post-migration to v1, since the v0 `PendingAvailability` storage + /// would otherwise have the exact same prefix which could cause undefined behaviour when doing + /// the migration. #[pallet::storage] + #[pallet::storage_prefix = "V1"] pub(crate) type PendingAvailability = StorageMap< _, Twox64Concat, ParaId, - CandidatePendingAvailability>, + VecDeque>>, >; - /// The commitments of candidates pending availability, by `ParaId`. - #[pallet::storage] - pub(crate) type PendingAvailabilityCommitments = - StorageMap<_, Twox64Concat, ParaId, CandidateCommitments>; - #[pallet::call] impl Pallet {} } @@ -469,9 +458,7 @@ impl Pallet { ) { // unlike most drain methods, drained elements are not cleared on `Drop` of the iterator // and require consumption. - for _ in >::drain() {} for _ in >::drain() {} - for _ in >::drain() {} Self::cleanup_outgoing_ump_dispatch_queues(outgoing_paras); } @@ -490,27 +477,18 @@ impl Pallet { /// /// Bitfields are expected to have been sanitized already. E.g. via `sanitize_bitfields`! /// - /// Updates storage items `PendingAvailability` and `AvailabilityBitfields`. + /// Updates storage items `PendingAvailability`. /// /// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became /// available, and cores free. - pub(crate) fn update_pending_availability_and_get_freed_cores( - expected_bits: usize, + pub(crate) fn update_pending_availability_and_get_freed_cores( validators: &[ValidatorId], signed_bitfields: SignedAvailabilityBitfields, - core_lookup: F, - ) -> Vec<(CoreIndex, CandidateHash)> - where - F: Fn(CoreIndex) -> Option, - { - let mut assigned_paras_record = (0..expected_bits) - .map(|bit_index| core_lookup(CoreIndex::from(bit_index as u32))) - .map(|opt_para_id| { - opt_para_id.map(|para_id| (para_id, PendingAvailability::::get(¶_id))) - }) - .collect::>(); + ) -> Vec<(CoreIndex, CandidateHash)> { + let threshold = availability_threshold(validators.len()); + + let mut votes_per_core: BTreeMap> = BTreeMap::new(); - let now = >::block_number(); for (checked_bitfield, validator_index) in signed_bitfields.into_iter().map(|signed_bitfield| { let validator_idx = signed_bitfield.validator_index(); @@ -518,310 +496,275 @@ impl Pallet { (checked_bitfield, validator_idx) }) { for (bit_idx, _) in checked_bitfield.0.iter().enumerate().filter(|(_, is_av)| **is_av) { - let pending_availability = if let Some((_, pending_availability)) = - assigned_paras_record[bit_idx].as_mut() - { - pending_availability - } else { - // For honest validators, this happens in case of unoccupied cores, - // which in turn happens in case of a disputed candidate. - // A malicious one might include arbitrary indices, but they are represented - // by `None` values and will be sorted out in the next if case. - continue - }; - - // defensive check - this is constructed by loading the availability bitfield - // record, which is always `Some` if the core is occupied - that's why we're here. - let validator_index = validator_index.0 as usize; - if let Some(mut bit) = - pending_availability.as_mut().and_then(|candidate_pending_availability| { - candidate_pending_availability.availability_votes.get_mut(validator_index) - }) { - *bit = true; - } + let core_index = CoreIndex(bit_idx as u32); + votes_per_core + .entry(core_index) + .or_insert_with(|| BTreeSet::new()) + .insert(validator_index); } - - let record = - AvailabilityBitfieldRecord { bitfield: checked_bitfield, submitted_at: now }; - - >::insert(&validator_index, record); } - let threshold = availability_threshold(validators.len()); + let mut freed_cores = vec![]; + + let pending_paraids: Vec<_> = >::iter_keys().collect(); + for paraid in pending_paraids { + >::mutate(paraid, |candidates| { + if let Some(candidates) = candidates { + let mut last_enacted_index: Option = None; + + for (candidate_index, candidate) in candidates.iter_mut().enumerate() { + if let Some(validator_indices) = votes_per_core.remove(&candidate.core) { + for validator_index in validator_indices.iter() { + // defensive check - this is constructed by loading the + // availability bitfield record, which is always `Some` if + // the core is occupied - that's why we're here. + if let Some(mut bit) = + candidate.availability_votes.get_mut(validator_index.0 as usize) + { + *bit = true; + } + } + } + + // We check for the candidate's availability even if we didn't get any new + // bitfields for its core, as it may have already been available at a + // previous block but wasn't enacted due to its predecessors not being + // available. + if candidate.availability_votes.count_ones() >= threshold { + // We can only enact a candidate if we've enacted all of its + // predecessors already. + let can_enact = if candidate_index == 0 { + last_enacted_index == None + } else { + let prev_candidate_index = usize::try_from(candidate_index - 1) + .expect("Previous `if` would have caught a 0 candidate index."); + matches!(last_enacted_index, Some(old_index) if old_index == prev_candidate_index) + }; + + if can_enact { + last_enacted_index = Some(candidate_index); + } + } + } - let mut freed_cores = Vec::with_capacity(expected_bits); - for (para_id, pending_availability) in assigned_paras_record - .into_iter() - .flatten() - .filter_map(|(id, p)| p.map(|p| (id, p))) - { - if pending_availability.availability_votes.count_ones() >= threshold { - >::remove(¶_id); - let commitments = match PendingAvailabilityCommitments::::take(¶_id) { - Some(commitments) => commitments, - None => { - log::warn!( - target: LOG_TARGET, - "Inclusion::process_bitfields: PendingAvailability and PendingAvailabilityCommitments - are out of sync, did someone mess with the storage?", - ); - continue - }, - }; - - let receipt = CommittedCandidateReceipt { - descriptor: pending_availability.descriptor, - commitments, - }; - let _weight = Self::enact_candidate( - pending_availability.relay_parent_number, - receipt, - pending_availability.backers, - pending_availability.availability_votes, - pending_availability.core, - pending_availability.backing_group, - ); - - freed_cores.push((pending_availability.core, pending_availability.hash)); - } else { - >::insert(¶_id, &pending_availability); - } + // Trim the pending availability candidates storage and enact candidates of this + // para now. + if let Some(last_enacted_index) = last_enacted_index { + let evicted_candidates = candidates.drain(0..=last_enacted_index); + for candidate in evicted_candidates { + freed_cores.push((candidate.core, candidate.hash)); + + let receipt = CommittedCandidateReceipt { + descriptor: candidate.descriptor, + commitments: candidate.commitments, + }; + let _weight = Self::enact_candidate( + candidate.relay_parent_number, + receipt, + candidate.backers, + candidate.availability_votes, + candidate.core, + candidate.backing_group, + ); + } + } + } + }); } freed_cores } - /// Process candidates that have been backed. Provide the relay storage root, a set of - /// candidates and scheduled cores. + /// Process candidates that have been backed. Provide a set of + /// candidates along with their scheduled cores. /// - /// Both should be sorted ascending by core index, and the candidates should be a subset of - /// scheduled cores. If these conditions are not met, the execution of the function fails. + /// Candidates of the same paraid should be sorted according to their dependency order (they + /// should form a chain). If this condition is not met, this function will return an error. + /// (This really should not happen here, if the candidates were properly sanitised in + /// paras_inherent). pub(crate) fn process_candidates( allowed_relay_parents: &AllowedRelayParentsTracker>, - candidates: Vec<(BackedCandidate, CoreIndex)>, + candidates: &BTreeMap, CoreIndex)>>, group_validators: GV, core_index_enabled: bool, ) -> Result, DispatchError> where GV: Fn(GroupIndex) -> Option>, { - let now = >::block_number(); - if candidates.is_empty() { return Ok(ProcessedCandidates::default()) } - let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; + let now = >::block_number(); let validators = shared::Pallet::::active_validator_keys(); // Collect candidate receipts with backers. let mut candidate_receipt_with_backing_validator_indices = Vec::with_capacity(candidates.len()); + let mut core_indices = Vec::with_capacity(candidates.len()); - // Do all checks before writing storage. - let core_indices_and_backers = { - let mut core_indices_and_backers = Vec::with_capacity(candidates.len()); - let mut last_core = None; - - let mut check_assignment_in_order = |core_idx| -> DispatchResult { - ensure!( - last_core.map_or(true, |core| core_idx > core), - Error::::ScheduledOutOfOrder, - ); - - last_core = Some(core_idx); - Ok(()) + for (para_id, para_candidates) in candidates { + let mut latest_head_data = match Self::para_latest_head_data(para_id) { + None => { + defensive!("Latest included head data for paraid {:?} is None", para_id); + continue + }, + Some(latest_head_data) => latest_head_data, }; - // We combine an outer loop over candidates with an inner loop over the scheduled, - // where each iteration of the outer loop picks up at the position - // in scheduled just after the past iteration left off. - // - // If the candidates appear in the same order as they appear in `scheduled`, - // then they should always be found. If the end of `scheduled` is reached, - // then the candidate was either not scheduled or out-of-order. - // - // In the meantime, we do certain sanity checks on the candidates and on the scheduled - // list. - for (candidate_idx, (backed_candidate, core_index)) in candidates.iter().enumerate() { - let relay_parent_hash = backed_candidate.descriptor().relay_parent; - let para_id = backed_candidate.descriptor().para_id; - - let prev_context = >::para_most_recent_context(para_id); - - let check_ctx = CandidateCheckContext::::new(prev_context); - let signing_context = SigningContext { - parent_hash: relay_parent_hash, - session_index: shared::Pallet::::session_index(), - }; - - let relay_parent_number = match check_ctx.verify_backed_candidate( + for (candidate, core) in para_candidates.iter() { + let candidate_hash = candidate.candidate().hash(); + + let check_ctx = CandidateCheckContext::::new(None); + let relay_parent_number = check_ctx.verify_backed_candidate( &allowed_relay_parents, - candidate_idx, - backed_candidate.candidate(), - )? { - Err(FailedToCreatePVD) => { - log::debug!( - target: LOG_TARGET, - "Failed to create PVD for candidate {}", - candidate_idx, - ); - // We don't want to error out here because it will - // brick the relay-chain. So we return early without - // doing anything. - return Ok(ProcessedCandidates::default()) - }, - Ok(rpn) => rpn, - }; - - let (validator_indices, _) = - backed_candidate.validator_indices_and_core_index(core_index_enabled); - - log::debug!( - target: LOG_TARGET, - "Candidate {:?} on {:?}, - core_index_enabled = {}", - backed_candidate.hash(), - core_index, - core_index_enabled - ); - - check_assignment_in_order(core_index)?; - - let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; - - ensure!( - >::get(¶_id).is_none() && - >::get(¶_id).is_none(), - Error::::CandidateScheduledBeforeParaFree, - ); - - // The candidate based upon relay parent `N` should be backed by a group - // assigned to core at block `N + 1`. Thus, `relay_parent_number + 1` - // will always land in the current session. + candidate.candidate(), + latest_head_data.clone(), + )?; + + // The candidate based upon relay parent `N` should be backed by a + // group assigned to core at block `N + 1`. Thus, + // `relay_parent_number + 1` will always land in the current + // session. let group_idx = >::group_assigned_to_core( - *core_index, + *core, relay_parent_number + One::one(), ) .ok_or_else(|| { log::warn!( target: LOG_TARGET, - "Failed to compute group index for candidate {}", - candidate_idx + "Failed to compute group index for candidate {:?}", + candidate_hash ); Error::::InvalidAssignment })?; let group_vals = group_validators(group_idx).ok_or_else(|| Error::::InvalidGroupIndex)?; - // check the signatures in the backing and that it is a majority. - { - let maybe_amount_validated = primitives::check_candidate_backing( - backed_candidate.candidate().hash(), - backed_candidate.validity_votes(), - validator_indices, - &signing_context, - group_vals.len(), - |intra_group_vi| { - group_vals - .get(intra_group_vi) - .and_then(|vi| validators.get(vi.0 as usize)) - .map(|v| v.clone()) - }, - ); - - match maybe_amount_validated { - Ok(amount_validated) => ensure!( - amount_validated >= - effective_minimum_backing_votes( - group_vals.len(), - minimum_backing_votes - ), - Error::::InsufficientBacking, - ), - Err(()) => { - Err(Error::::InvalidBacking)?; - }, - } - - let mut backer_idx_and_attestation = - Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( - validator_indices.count_ones(), - ); - let candidate_receipt = backed_candidate.receipt(); - - for ((bit_idx, _), attestation) in validator_indices - .iter() - .enumerate() - .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes().iter().cloned()) - { - let val_idx = - group_vals.get(bit_idx).expect("this query succeeded above; qed"); - backer_idx_and_attestation.push((*val_idx, attestation)); - - backers.set(val_idx.0 as _, true); + // Check backing vote count and validity. + let (backers, backer_idx_and_attestation) = Self::check_backing_votes( + candidate, + &validators, + group_vals, + core_index_enabled, + )?; + + // Found a valid candidate. + latest_head_data = candidate.candidate().commitments.head_data.clone(); + candidate_receipt_with_backing_validator_indices + .push((candidate.receipt(), backer_idx_and_attestation)); + core_indices.push((*core, *para_id)); + + // Update storage now + >::mutate(¶_id, |pending_availability| { + let new_candidate = CandidatePendingAvailability { + core: *core, + hash: candidate_hash, + descriptor: candidate.candidate().descriptor.clone(), + commitments: candidate.candidate().commitments.clone(), + // initialize all availability votes to 0. + availability_votes: bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()], + relay_parent_number, + backers: backers.to_bitvec(), + backed_in_number: now, + backing_group: group_idx, + }; + + if let Some(pending_availability) = pending_availability { + pending_availability.push_back(new_candidate); + } else { + *pending_availability = + Some([new_candidate].into_iter().collect::>()) } - candidate_receipt_with_backing_validator_indices - .push((candidate_receipt, backer_idx_and_attestation)); - } + }); - core_indices_and_backers.push(( - (*core_index, para_id), - backers, + // Deposit backed event. + Self::deposit_event(Event::::CandidateBacked( + candidate.candidate().to_plain(), + candidate.candidate().commitments.head_data.clone(), + *core, group_idx, - relay_parent_number, )); } + } - core_indices_and_backers - }; + Ok(ProcessedCandidates:: { + core_indices, + candidate_receipt_with_backing_validator_indices, + }) + } - // one more sweep for actually writing to storage. - let core_indices = core_indices_and_backers.iter().map(|(c, ..)| *c).collect(); - for ((candidate, _), (core, backers, group, relay_parent_number)) in - candidates.into_iter().zip(core_indices_and_backers) - { - let para_id = candidate.descriptor().para_id; + // Get the latest backed output head data of this para. + pub(crate) fn para_latest_head_data(para_id: &ParaId) -> Option { + match >::get(para_id).and_then(|pending_candidates| { + pending_candidates.back().map(|x| x.commitments.head_data.clone()) + }) { + Some(head_data) => Some(head_data), + None => >::para_head(para_id), + } + } - // initialize all availability votes to 0. - let availability_votes: BitVec = - bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; + fn check_backing_votes( + backed_candidate: &BackedCandidate, + validators: &[ValidatorId], + group_vals: Vec, + core_index_enabled: bool, + ) -> Result<(BitVec, Vec<(ValidatorIndex, ValidityAttestation)>), Error> { + let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; - Self::deposit_event(Event::::CandidateBacked( - candidate.candidate().to_plain(), - candidate.candidate().commitments.head_data.clone(), - core.0, - group, - )); + let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; + let signing_context = SigningContext { + parent_hash: backed_candidate.descriptor().relay_parent, + session_index: shared::Pallet::::session_index(), + }; - let candidate_hash = candidate.candidate().hash(); + let (validator_indices, _) = + backed_candidate.validator_indices_and_core_index(core_index_enabled); + + // check the signatures in the backing and that it is a majority. + let maybe_amount_validated = primitives::check_candidate_backing( + backed_candidate.candidate().hash(), + backed_candidate.validity_votes(), + validator_indices, + &signing_context, + group_vals.len(), + |intra_group_vi| { + group_vals + .get(intra_group_vi) + .and_then(|vi| validators.get(vi.0 as usize)) + .map(|v| v.clone()) + }, + ); - let (descriptor, commitments) = ( - candidate.candidate().descriptor.clone(), - candidate.candidate().commitments.clone(), - ); + match maybe_amount_validated { + Ok(amount_validated) => ensure!( + amount_validated >= + effective_minimum_backing_votes(group_vals.len(), minimum_backing_votes), + Error::::InsufficientBacking, + ), + Err(()) => { + Err(Error::::InvalidBacking)?; + }, + } - >::insert( - ¶_id, - CandidatePendingAvailability { - core: core.0, - hash: candidate_hash, - descriptor, - availability_votes, - relay_parent_number, - backers: backers.to_bitvec(), - backed_in_number: now, - backing_group: group, - }, + let mut backer_idx_and_attestation = + Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( + validator_indices.count_ones(), ); - >::insert(¶_id, commitments); + + for ((bit_idx, _), attestation) in validator_indices + .iter() + .enumerate() + .filter(|(_, signed)| **signed) + .zip(backed_candidate.validity_votes().iter().cloned()) + { + let val_idx = group_vals.get(bit_idx).expect("this query succeeded above; qed"); + backer_idx_and_attestation.push((*val_idx, attestation)); + + backers.set(val_idx.0 as _, true); } - Ok(ProcessedCandidates:: { - core_indices, - candidate_receipt_with_backing_validator_indices, - }) + Ok((backers, backer_idx_and_attestation)) } /// Run the acceptance criteria checks on the given candidate commitments. @@ -1028,110 +971,155 @@ impl Pallet { weight } - /// Cleans up all paras pending availability that the predicate returns true for. - /// - /// The predicate accepts the index of the core and the block number the core has been occupied - /// since (i.e. the block number the candidate was backed at in this fork of the relay chain). + /// Cleans up all timed out candidates as well as their descendant candidates. /// /// Returns a vector of cleaned-up core IDs. - pub(crate) fn collect_pending( - pred: impl Fn(BlockNumberFor) -> AvailabilityTimeoutStatus>, - ) -> Vec { - let mut cleaned_up_ids = Vec::new(); - let mut cleaned_up_cores = Vec::new(); - - for (para_id, pending_record) in >::iter() { - if pred(pending_record.backed_in_number).timed_out { - cleaned_up_ids.push(para_id); - cleaned_up_cores.push(pending_record.core); - } - } - - for para_id in cleaned_up_ids { - let pending = >::take(¶_id); - let commitments = >::take(¶_id); - - if let (Some(pending), Some(commitments)) = (pending, commitments) { - // defensive: this should always be true. - let candidate = CandidateReceipt { - descriptor: pending.descriptor, - commitments_hash: commitments.hash(), - }; + pub(crate) fn free_timedout() -> Vec { + let timeout_pred = >::availability_timeout_predicate(); + + let timed_out: Vec<_> = Self::free_failed_cores( + |candidate| timeout_pred(candidate.backed_in_number).timed_out, + None, + ) + .collect(); + + let mut timed_out_cores = Vec::with_capacity(timed_out.len()); + for candidate in timed_out.iter() { + timed_out_cores.push(candidate.core); + + let receipt = CandidateReceipt { + descriptor: candidate.descriptor.clone(), + commitments_hash: candidate.commitments.hash(), + }; - Self::deposit_event(Event::::CandidateTimedOut( - candidate, - commitments.head_data, - pending.core, - )); - } + Self::deposit_event(Event::::CandidateTimedOut( + receipt, + candidate.commitments.head_data.clone(), + candidate.core, + )); } - cleaned_up_cores + timed_out_cores } - /// Cleans up all paras pending availability that are in the given list of disputed candidates. + /// Cleans up all cores pending availability occupied by one of the disputed candidates or which + /// are descendants of a disputed candidate. /// - /// Returns a vector of cleaned-up core IDs. - pub(crate) fn collect_disputed(disputed: &BTreeSet) -> Vec { - let mut cleaned_up_ids = Vec::new(); - let mut cleaned_up_cores = Vec::new(); - - for (para_id, pending_record) in >::iter() { - if disputed.contains(&pending_record.hash) { - cleaned_up_ids.push(para_id); - cleaned_up_cores.push(pending_record.core); + /// Returns a vector of cleaned-up core IDs, along with the evicted candidate hashes. + pub(crate) fn free_disputed( + disputed: &BTreeSet, + ) -> Vec<(CoreIndex, CandidateHash)> { + Self::free_failed_cores( + |candidate| disputed.contains(&candidate.hash), + Some(disputed.len()), + ) + .map(|candidate| (candidate.core, candidate.hash)) + .collect() + } + + // Clean up cores whose candidates are deemed as failed by the predicate. `pred` returns true if + // a candidate is considered failed. + // A failed candidate also frees all subsequent cores which hold descendants of said candidate. + fn free_failed_cores< + P: Fn(&CandidatePendingAvailability>) -> bool, + >( + pred: P, + capacity_hint: Option, + ) -> impl Iterator>> { + let mut earliest_dropped_indices: BTreeMap = BTreeMap::new(); + + for (para_id, pending_candidates) in >::iter() { + // We assume that pending candidates are stored in dependency order. So we need to store + // the earliest dropped candidate. All others that follow will get freed as well. + let mut earliest_dropped_idx = None; + for (index, candidate) in pending_candidates.iter().enumerate() { + if pred(candidate) { + earliest_dropped_idx = Some(index); + // Since we're looping the candidates in dependency order, we've found the + // earliest failed index for this paraid. + break; + } + } + + if let Some(earliest_dropped_idx) = earliest_dropped_idx { + earliest_dropped_indices.insert(para_id, earliest_dropped_idx); } } - for para_id in cleaned_up_ids { - let _ = >::take(¶_id); - let _ = >::take(¶_id); + let mut cleaned_up_cores = + if let Some(capacity) = capacity_hint { Vec::with_capacity(capacity) } else { vec![] }; + + for (para_id, earliest_dropped_idx) in earliest_dropped_indices { + // Do cleanups and record the cleaned up cores + >::mutate(¶_id, |record| { + if let Some(record) = record { + let cleaned_up = record.drain(earliest_dropped_idx..); + cleaned_up_cores.extend(cleaned_up); + } + }); } - cleaned_up_cores + cleaned_up_cores.into_iter() } - /// Forcibly enact the candidate with the given ID as though it had been deemed available - /// by bitfields. + /// Forcibly enact the pending candidates of the given paraid as though they had been deemed + /// available by bitfields. /// /// Is a no-op if there is no candidate pending availability for this para-id. - /// This should generally not be used but it is useful during execution of Runtime APIs, + /// If there are multiple candidates pending availability for this para-id, it will enact all of + /// them. This should generally not be used but it is useful during execution of Runtime APIs, /// where the changes to the state are expected to be discarded directly after. pub(crate) fn force_enact(para: ParaId) { - let pending = >::take(¶); - let commitments = >::take(¶); - - if let (Some(pending), Some(commitments)) = (pending, commitments) { - let candidate = - CommittedCandidateReceipt { descriptor: pending.descriptor, commitments }; - - Self::enact_candidate( - pending.relay_parent_number, - candidate, - pending.backers, - pending.availability_votes, - pending.core, - pending.backing_group, - ); - } + >::mutate(¶, |candidates| { + if let Some(candidates) = candidates { + for candidate in candidates.drain(..) { + let receipt = CommittedCandidateReceipt { + descriptor: candidate.descriptor, + commitments: candidate.commitments, + }; + + Self::enact_candidate( + candidate.relay_parent_number, + receipt, + candidate.backers, + candidate.availability_votes, + candidate.core, + candidate.backing_group, + ); + } + } + }); } - /// Returns the `CommittedCandidateReceipt` pending availability for the para provided, if any. + /// Returns the first `CommittedCandidateReceipt` pending availability for the para provided, if + /// any. pub(crate) fn candidate_pending_availability( para: ParaId, ) -> Option> { - >::get(¶) - .map(|p| p.descriptor) - .and_then(|d| >::get(¶).map(move |c| (d, c))) - .map(|(d, c)| CommittedCandidateReceipt { descriptor: d, commitments: c }) + >::get(¶).and_then(|p| { + p.get(0).map(|p| CommittedCandidateReceipt { + descriptor: p.descriptor.clone(), + commitments: p.commitments.clone(), + }) + }) } - /// Returns the metadata around the candidate pending availability for the + /// Returns the metadata around the first candidate pending availability for the /// para provided, if any. pub(crate) fn pending_availability( para: ParaId, + ) -> Option>> { + >::get(¶).and_then(|p| p.get(0).cloned()) + } + + /// Returns the metadata around the candidate pending availability occupying the supplied core, + /// if any. + pub(crate) fn pending_availability_with_core( + para: ParaId, + core: CoreIndex, ) -> Option>> { >::get(¶) + .and_then(|p| p.iter().find(|c| c.core == core).cloned()) } } @@ -1182,10 +1170,6 @@ pub(crate) struct CandidateCheckContext { prev_context: Option>, } -/// An error indicating that creating Persisted Validation Data failed -/// while checking a candidate's validity. -pub(crate) struct FailedToCreatePVD; - impl CandidateCheckContext { pub(crate) fn new(prev_context: Option>) -> Self { Self { config: >::config(), prev_context } @@ -1203,9 +1187,9 @@ impl CandidateCheckContext { pub(crate) fn verify_backed_candidate( &self, allowed_relay_parents: &AllowedRelayParentsTracker>, - candidate_idx: usize, backed_candidate_receipt: &CommittedCandidateReceipt<::Hash>, - ) -> Result, FailedToCreatePVD>, Error> { + parent_head_data: HeadData, + ) -> Result, Error> { let para_id = backed_candidate_receipt.descriptor().para_id; let relay_parent = backed_candidate_receipt.descriptor().relay_parent; @@ -1218,16 +1202,11 @@ impl CandidateCheckContext { }; { - let persisted_validation_data = match crate::util::make_persisted_validation_data::( - para_id, + let persisted_validation_data = make_persisted_validation_data_with_parent::( relay_parent_number, relay_parent_storage_root, - ) - .defensive_proof("the para is registered") - { - Some(l) => l, - None => return Ok(Err(FailedToCreatePVD)), - }; + parent_head_data, + ); let expected = persisted_validation_data.hash(); @@ -1268,13 +1247,13 @@ impl CandidateCheckContext { ) { log::debug!( target: LOG_TARGET, - "Validation outputs checking during inclusion of a candidate {} for parachain `{}` failed", - candidate_idx, + "Validation outputs checking during inclusion of a candidate {:?} for parachain `{}` failed", + backed_candidate_receipt.hash(), u32::from(para_id), ); Err(err.strip_into_dispatch_err::())?; }; - Ok(Ok(relay_parent_number)) + Ok(relay_parent_number) } /// Check the given outputs after candidate validation on whether it passes the acceptance diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 3fe7d7f0c7d..5ab3a13324d 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -27,7 +27,7 @@ use crate::{ shared::AllowedRelayParentsTracker, }; use primitives::{ - effective_minimum_backing_votes, SignedAvailabilityBitfields, + effective_minimum_backing_votes, AvailabilityBitfield, SignedAvailabilityBitfields, UncheckedSignedAvailabilityBitfields, }; @@ -360,81 +360,288 @@ fn simple_sanitize_bitfields( } /// Process a set of already sanitized bitfields. pub(crate) fn process_bitfields( - expected_bits: usize, signed_bitfields: SignedAvailabilityBitfields, - core_lookup: impl Fn(CoreIndex) -> Option, ) -> Vec<(CoreIndex, CandidateHash)> { let validators = shared::Pallet::::active_validator_keys(); - ParaInclusion::update_pending_availability_and_get_freed_cores::<_>( - expected_bits, + ParaInclusion::update_pending_availability_and_get_freed_cores( &validators[..], signed_bitfields, - core_lookup, ) } #[test] -fn collect_pending_cleans_up_pending() { +fn free_timedout() { let chain_a = ParaId::from(1_u32); let chain_b = ParaId::from(2_u32); - let thread_a = ParaId::from(3_u32); + let chain_c = ParaId::from(3_u32); + let chain_d = ParaId::from(4_u32); + let chain_e = ParaId::from(5_u32); + let chain_f = ParaId::from(6_u32); + let thread_a = ParaId::from(7_u32); let paras = vec![ (chain_a, ParaKind::Parachain), (chain_b, ParaKind::Parachain), + (chain_c, ParaKind::Parachain), + (chain_d, ParaKind::Parachain), + (chain_e, ParaKind::Parachain), + (chain_f, ParaKind::Parachain), (thread_a, ParaKind::Parathread), ]; let mut config = genesis_config(paras); config.configuration.config.scheduler_params.group_rotation_frequency = 3; new_test_ext(config).execute_with(|| { - let default_candidate = TestCandidateBuilder::default().build(); - >::insert( - chain_a, + let timed_out_cores = ParaInclusion::free_timedout(); + assert!(timed_out_cores.is_empty()); + + let make_candidate = |core_index: u32, timed_out: bool| { + let default_candidate = TestCandidateBuilder::default().build(); + let backed_in_number = if timed_out { 0 } else { 5 }; + CandidatePendingAvailability { - core: CoreIndex::from(0), + core: CoreIndex::from(core_index), hash: default_candidate.hash(), descriptor: default_candidate.descriptor.clone(), availability_votes: default_availability_votes(), relay_parent_number: 0, - backed_in_number: 0, + backed_in_number, backers: default_backing_bitfield(), - backing_group: GroupIndex::from(0), - }, - ); - PendingAvailabilityCommitments::::insert( + backing_group: GroupIndex::from(core_index), + commitments: default_candidate.commitments.clone(), + } + }; + + >::insert( chain_a, - default_candidate.commitments.clone(), + [make_candidate(0, true)].into_iter().collect::>(), ); >::insert( &chain_b, + [make_candidate(1, false)].into_iter().collect::>(), + ); + + // 2 chained candidates. The first one is timed out. The other will be evicted also. + let mut c_candidates = VecDeque::new(); + c_candidates.push_back(make_candidate(2, true)); + c_candidates.push_back(make_candidate(3, false)); + + >::insert(&chain_c, c_candidates); + + // 2 chained candidates. All are timed out. + let mut d_candidates = VecDeque::new(); + d_candidates.push_back(make_candidate(4, true)); + d_candidates.push_back(make_candidate(5, true)); + + >::insert(&chain_d, d_candidates); + + // 3 chained candidates. The second one is timed out. The first one will remain in place. + // With the current time out predicate this scenario is impossible. But this is not a + // concern for this module. + let mut e_candidates = VecDeque::new(); + e_candidates.push_back(make_candidate(6, false)); + e_candidates.push_back(make_candidate(7, true)); + e_candidates.push_back(make_candidate(8, false)); + + >::insert(&chain_e, e_candidates); + + // 3 chained candidates, none are timed out. + let mut f_candidates = VecDeque::new(); + f_candidates.push_back(make_candidate(9, false)); + f_candidates.push_back(make_candidate(10, false)); + f_candidates.push_back(make_candidate(11, false)); + + >::insert(&chain_f, f_candidates); + + run_to_block(5, |_| None); + + assert_eq!(>::get(&chain_a).unwrap().len(), 1); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert_eq!(>::get(&chain_c).unwrap().len(), 2); + assert_eq!(>::get(&chain_d).unwrap().len(), 2); + assert_eq!(>::get(&chain_e).unwrap().len(), 3); + assert_eq!(>::get(&chain_f).unwrap().len(), 3); + + let timed_out_cores = ParaInclusion::free_timedout(); + + assert_eq!( + timed_out_cores, + vec![ + CoreIndex(0), + CoreIndex(2), + CoreIndex(3), + CoreIndex(4), + CoreIndex(5), + CoreIndex(7), + CoreIndex(8), + ] + ); + + assert!(>::get(&chain_a).unwrap().is_empty()); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert!(>::get(&chain_c).unwrap().is_empty()); + assert!(>::get(&chain_d).unwrap().is_empty()); + assert_eq!( + >::get(&chain_e) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(6)] + ); + assert_eq!( + >::get(&chain_f) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(9), CoreIndex(10), CoreIndex(11)] + ); + }); +} + +#[test] +fn free_disputed() { + let chain_a = ParaId::from(1_u32); + let chain_b = ParaId::from(2_u32); + let chain_c = ParaId::from(3_u32); + let chain_d = ParaId::from(4_u32); + let chain_e = ParaId::from(5_u32); + let chain_f = ParaId::from(6_u32); + let thread_a = ParaId::from(7_u32); + + let paras = vec![ + (chain_a, ParaKind::Parachain), + (chain_b, ParaKind::Parachain), + (chain_c, ParaKind::Parachain), + (chain_d, ParaKind::Parachain), + (chain_e, ParaKind::Parachain), + (chain_f, ParaKind::Parachain), + (thread_a, ParaKind::Parathread), + ]; + let mut config = genesis_config(paras); + config.configuration.config.scheduler_params.group_rotation_frequency = 3; + new_test_ext(config).execute_with(|| { + let disputed_cores = ParaInclusion::free_disputed(&BTreeSet::new()); + assert!(disputed_cores.is_empty()); + + let disputed_cores = ParaInclusion::free_disputed( + &[CandidateHash::default()].into_iter().collect::>(), + ); + assert!(disputed_cores.is_empty()); + + let make_candidate = |core_index: u32| { + let default_candidate = TestCandidateBuilder::default().build(); + CandidatePendingAvailability { - core: CoreIndex::from(1), - hash: default_candidate.hash(), - descriptor: default_candidate.descriptor, + core: CoreIndex::from(core_index), + hash: CandidateHash(Hash::from_low_u64_be(core_index as _)), + descriptor: default_candidate.descriptor.clone(), availability_votes: default_availability_votes(), relay_parent_number: 0, - backed_in_number: 5, + backed_in_number: 0, backers: default_backing_bitfield(), - backing_group: GroupIndex::from(1), - }, + backing_group: GroupIndex::from(core_index), + commitments: default_candidate.commitments.clone(), + } + }; + + // Disputed + >::insert( + chain_a, + [make_candidate(0)].into_iter().collect::>(), + ); + + // Not disputed. + >::insert( + &chain_b, + [make_candidate(1)].into_iter().collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_b, default_candidate.commitments); + + // 2 chained candidates. The first one is disputed. The other will be evicted also. + let mut c_candidates = VecDeque::new(); + c_candidates.push_back(make_candidate(2)); + c_candidates.push_back(make_candidate(3)); + + >::insert(&chain_c, c_candidates); + + // 2 chained candidates. All are disputed. + let mut d_candidates = VecDeque::new(); + d_candidates.push_back(make_candidate(4)); + d_candidates.push_back(make_candidate(5)); + + >::insert(&chain_d, d_candidates); + + // 3 chained candidates. The second one is disputed. The first one will remain in place. + let mut e_candidates = VecDeque::new(); + e_candidates.push_back(make_candidate(6)); + e_candidates.push_back(make_candidate(7)); + e_candidates.push_back(make_candidate(8)); + + >::insert(&chain_e, e_candidates); + + // 3 chained candidates, none are disputed. + let mut f_candidates = VecDeque::new(); + f_candidates.push_back(make_candidate(9)); + f_candidates.push_back(make_candidate(10)); + f_candidates.push_back(make_candidate(11)); + + >::insert(&chain_f, f_candidates); run_to_block(5, |_| None); - assert!(>::get(&chain_a).is_some()); - assert!(>::get(&chain_b).is_some()); - assert!(>::get(&chain_a).is_some()); - assert!(>::get(&chain_b).is_some()); + assert_eq!(>::get(&chain_a).unwrap().len(), 1); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert_eq!(>::get(&chain_c).unwrap().len(), 2); + assert_eq!(>::get(&chain_d).unwrap().len(), 2); + assert_eq!(>::get(&chain_e).unwrap().len(), 3); + assert_eq!(>::get(&chain_f).unwrap().len(), 3); + + let disputed_candidates = [ + CandidateHash(Hash::from_low_u64_be(0)), + CandidateHash(Hash::from_low_u64_be(2)), + CandidateHash(Hash::from_low_u64_be(4)), + CandidateHash(Hash::from_low_u64_be(5)), + CandidateHash(Hash::from_low_u64_be(7)), + ] + .into_iter() + .collect::>(); + let disputed_cores = ParaInclusion::free_disputed(&disputed_candidates); - ParaInclusion::collect_pending(Scheduler::availability_timeout_predicate()); + assert_eq!( + disputed_cores.into_iter().map(|(core, _)| core).collect::>(), + vec![ + CoreIndex(0), + CoreIndex(2), + CoreIndex(3), + CoreIndex(4), + CoreIndex(5), + CoreIndex(7), + CoreIndex(8), + ] + ); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_some()); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_some()); + assert!(>::get(&chain_a).unwrap().is_empty()); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert!(>::get(&chain_c).unwrap().is_empty()); + assert!(>::get(&chain_d).unwrap().is_empty()); + assert_eq!( + >::get(&chain_e) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(6)] + ); + assert_eq!( + >::get(&chain_f) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(9), CoreIndex(10), CoreIndex(11)] + ); }); } @@ -474,14 +681,6 @@ fn bitfield_checks() { let signing_context = SigningContext { parent_hash: System::parent_hash(), session_index: 5 }; - let core_lookup = |core| match core { - core if core == CoreIndex::from(0) => Some(chain_a), - core if core == CoreIndex::from(1) => Some(chain_b), - core if core == CoreIndex::from(2) => Some(thread_a), - core if core == CoreIndex::from(3) => None, // for the expected_cores() + 1 test below. - _ => panic!("out of bounds for testing"), - }; - // too many bits in bitfield { let mut bare_bitfield = default_bitfield(); @@ -550,7 +749,7 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields); assert!(x.is_empty(), "No core should be freed."); } @@ -571,7 +770,7 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields); assert!(x.is_empty(), "No core should be freed."); } @@ -579,12 +778,10 @@ fn bitfield_checks() { { let mut bare_bitfield = default_bitfield(); - assert_eq!(core_lookup(CoreIndex::from(0)), Some(chain_a)); - let default_candidate = TestCandidateBuilder::default().build(); >::insert( chain_a, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(0), hash: default_candidate.hash(), descriptor: default_candidate.descriptor, @@ -593,9 +790,11 @@ fn bitfield_checks() { backed_in_number: 0, backers: default_backing_bitfield(), backing_group: GroupIndex::from(0), - }, + commitments: default_candidate.commitments, + }] + .into_iter() + .collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_a, default_candidate.commitments); *bare_bitfield.0.get_mut(0).unwrap() = true; let signed = sign_bitfield( @@ -613,53 +812,10 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields); assert!(x.is_empty(), "No core should be freed."); >::remove(chain_a); - PendingAvailabilityCommitments::::remove(chain_a); - } - - // bitfield signed with pending bit signed, but no commitments. - { - let mut bare_bitfield = default_bitfield(); - - assert_eq!(core_lookup(CoreIndex::from(0)), Some(chain_a)); - - let default_candidate = TestCandidateBuilder::default().build(); - >::insert( - chain_a, - CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: default_candidate.hash(), - descriptor: default_candidate.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: 0, - backed_in_number: 0, - backers: default_backing_bitfield(), - backing_group: GroupIndex::from(0), - }, - ); - - *bare_bitfield.0.get_mut(0).unwrap() = true; - let signed = sign_bitfield( - &keystore, - &validators[0], - ValidatorIndex(0), - bare_bitfield, - &signing_context, - ); - - let checked_bitfields = simple_sanitize_bitfields( - vec![signed.into()], - DisputedBitfield::zeros(expected_bits()), - expected_bits(), - ); - assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); - // no core is freed - assert!(x.is_empty(), "No core should be freed."); } }); } @@ -673,13 +829,17 @@ fn availability_threshold_is_supermajority() { #[test] fn supermajority_bitfields_trigger_availability() { - let chain_a = ParaId::from(1_u32); - let chain_b = ParaId::from(2_u32); - let thread_a = ParaId::from(3_u32); + let chain_a = ParaId::from(0_u32); + let chain_b = ParaId::from(1_u32); + let chain_c = ParaId::from(2_u32); + let chain_d = ParaId::from(3_u32); + let thread_a = ParaId::from(4_u32); let paras = vec![ (chain_a, ParaKind::Parachain), (chain_b, ParaKind::Parachain), + (chain_c, ParaKind::Parachain), + (chain_d, ParaKind::Parachain), (thread_a, ParaKind::Parathread), ]; let validators = vec![ @@ -688,6 +848,8 @@ fn supermajority_bitfields_trigger_availability() { Sr25519Keyring::Charlie, Sr25519Keyring::Dave, Sr25519Keyring::Ferdie, + Sr25519Keyring::One, + Sr25519Keyring::Two, ]; let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); for validator in validators.iter() { @@ -707,13 +869,7 @@ fn supermajority_bitfields_trigger_availability() { let signing_context = SigningContext { parent_hash: System::parent_hash(), session_index: 5 }; - let core_lookup = |core| match core { - core if core == CoreIndex::from(0) => Some(chain_a), - core if core == CoreIndex::from(1) => Some(chain_b), - core if core == CoreIndex::from(2) => Some(thread_a), - _ => panic!("Core out of bounds for 2 parachains and 1 parathread core."), - }; - + // Chain A only has one candidate pending availability. It will be made available now. let candidate_a = TestCandidateBuilder { para_id: chain_a, head_data: vec![1, 2, 3, 4].into(), @@ -723,7 +879,7 @@ fn supermajority_bitfields_trigger_availability() { >::insert( chain_a, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(0), hash: candidate_a.hash(), descriptor: candidate_a.clone().descriptor, @@ -732,10 +888,13 @@ fn supermajority_bitfields_trigger_availability() { backed_in_number: 0, backers: backing_bitfield(&[3, 4]), backing_group: GroupIndex::from(0), - }, + commitments: candidate_a.clone().commitments, + }] + .into_iter() + .collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_a, candidate_a.clone().commitments); + // Chain B only has one candidate pending availability. It won't be made available now. let candidate_b = TestCandidateBuilder { para_id: chain_b, head_data: vec![5, 6, 7, 8].into(), @@ -745,7 +904,7 @@ fn supermajority_bitfields_trigger_availability() { >::insert( chain_b, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(1), hash: candidate_b.hash(), descriptor: candidate_b.descriptor, @@ -754,40 +913,99 @@ fn supermajority_bitfields_trigger_availability() { backed_in_number: 0, backers: backing_bitfield(&[0, 2]), backing_group: GroupIndex::from(1), - }, + commitments: candidate_b.commitments, + }] + .into_iter() + .collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_b, candidate_b.commitments); - // this bitfield signals that a and b are available. - let a_and_b_available = { - let mut bare_bitfield = default_bitfield(); - *bare_bitfield.0.get_mut(0).unwrap() = true; - *bare_bitfield.0.get_mut(1).unwrap() = true; + // Chain C has three candidates pending availability. The first and third candidates will be + // made available. Only the first candidate will be evicted from the core and enacted. + let candidate_c_1 = TestCandidateBuilder { + para_id: chain_c, + head_data: vec![7, 8].into(), + ..Default::default() + } + .build(); + let candidate_c_2 = TestCandidateBuilder { + para_id: chain_c, + head_data: vec![9, 10].into(), + ..Default::default() + } + .build(); + let candidate_c_3 = TestCandidateBuilder { + para_id: chain_c, + head_data: vec![11, 12].into(), + ..Default::default() + } + .build(); - bare_bitfield - }; + let mut c_candidates = VecDeque::new(); + c_candidates.push_back(CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_c_1.hash(), + descriptor: candidate_c_1.descriptor.clone(), + availability_votes: default_availability_votes(), + relay_parent_number: 0, + backed_in_number: 0, + backers: backing_bitfield(&[1]), + backing_group: GroupIndex::from(2), + commitments: candidate_c_1.commitments.clone(), + }); + c_candidates.push_back(CandidatePendingAvailability { + core: CoreIndex::from(3), + hash: candidate_c_2.hash(), + descriptor: candidate_c_2.descriptor.clone(), + availability_votes: default_availability_votes(), + relay_parent_number: 0, + backed_in_number: 0, + backers: backing_bitfield(&[5]), + backing_group: GroupIndex::from(3), + commitments: candidate_c_2.commitments.clone(), + }); + c_candidates.push_back(CandidatePendingAvailability { + core: CoreIndex::from(4), + hash: candidate_c_3.hash(), + descriptor: candidate_c_3.descriptor.clone(), + availability_votes: default_availability_votes(), + relay_parent_number: 0, + backed_in_number: 0, + backers: backing_bitfield(&[6]), + backing_group: GroupIndex::from(4), + commitments: candidate_c_3.commitments.clone(), + }); - // this bitfield signals that only a is available. - let a_available = { + >::insert(chain_c, c_candidates); + + // this bitfield signals that a and b are available. + let all_available = { let mut bare_bitfield = default_bitfield(); - *bare_bitfield.0.get_mut(0).unwrap() = true; + for bit in 0..=4 { + *bare_bitfield.0.get_mut(bit).unwrap() = true; + } bare_bitfield }; let threshold = availability_threshold(validators.len()); - // 4 of 5 first value >= 2/3 - assert_eq!(threshold, 4); + // 5 of 7 first value >= 2/3 + assert_eq!(threshold, 5); let signed_bitfields = validators .iter() .enumerate() .filter_map(|(i, key)| { - let to_sign = if i < 3 { - a_and_b_available.clone() - } else if i < 4 { - a_available.clone() + let to_sign = if i < 4 { + all_available.clone() + } else if i < 5 { + // this bitfield signals that only a, c1 and c3 are available. + let mut bare_bitfield = default_bitfield(); + *bare_bitfield.0.get_mut(0).unwrap() = true; + *bare_bitfield.0.get_mut(2).unwrap() = true; + *bare_bitfield.0.get_mut(4).unwrap() = true; + + bare_bitfield } else { // sign nothing. return None @@ -814,46 +1032,129 @@ fn supermajority_bitfields_trigger_availability() { ); assert_eq!(checked_bitfields.len(), old_len, "No bitfields should have been filtered!"); - // only chain A's core is freed. - let v = process_bitfields(expected_bits(), checked_bitfields, core_lookup); - assert_eq!(vec![(CoreIndex(0), candidate_a.hash())], v); - - // chain A had 4 signing off, which is >= threshold. - // chain B has 3 signing off, which is < threshold. - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_some()); - assert_eq!(>::get(&chain_b).unwrap().availability_votes, { - // check that votes from first 3 were tracked. + // only chain A's core and candidate's C1 core are freed. + let v = process_bitfields(checked_bitfields); + assert_eq!( + vec![(CoreIndex(2), candidate_c_1.hash()), (CoreIndex(0), candidate_a.hash())], + v + ); + let votes = |bits: &[usize]| { let mut votes = default_availability_votes(); - *votes.get_mut(0).unwrap() = true; - *votes.get_mut(1).unwrap() = true; - *votes.get_mut(2).unwrap() = true; + for bit in bits { + *votes.get_mut(*bit).unwrap() = true; + } votes - }); + }; - // and check that chain head was enacted. + assert!(>::get(&chain_a).unwrap().is_empty()); + assert_eq!( + >::get(&chain_b) + .unwrap() + .pop_front() + .unwrap() + .availability_votes, + votes(&[0, 1, 2, 3]) + ); + let mut pending_c = >::get(&chain_c).unwrap(); + assert_eq!(pending_c.pop_front().unwrap().availability_votes, votes(&[0, 1, 2, 3])); + assert_eq!(pending_c.pop_front().unwrap().availability_votes, votes(&[0, 1, 2, 3, 4])); + assert!(pending_c.is_empty()); + + // and check that chain heads. assert_eq!(Paras::para_head(&chain_a), Some(vec![1, 2, 3, 4].into())); + assert_ne!(Paras::para_head(&chain_b), Some(vec![5, 6, 7, 8].into())); + assert_eq!(Paras::para_head(&chain_c), Some(vec![7, 8].into())); // Check that rewards are applied. { let rewards = crate::mock::availability_rewards(); - assert_eq!(rewards.len(), 4); - assert_eq!(rewards.get(&ValidatorIndex(0)).unwrap(), &1); - assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &1); - assert_eq!(rewards.get(&ValidatorIndex(2)).unwrap(), &1); + assert_eq!(rewards.len(), 5); + assert_eq!(rewards.get(&ValidatorIndex(0)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(2)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &2); + } + + { + let rewards = crate::mock::backing_rewards(); + + assert_eq!(rewards.len(), 3); assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &1); + } + + // Add a new bitfield which will make candidate C2 available also. This will also evict and + // enact C3. + let signed_bitfields = vec![sign_bitfield( + &keystore, + &validators[5], + ValidatorIndex(5), + { + let mut bare_bitfield = default_bitfield(); + *bare_bitfield.0.get_mut(3).unwrap() = true; + bare_bitfield + }, + &signing_context, + ) + .into()]; + + let old_len = signed_bitfields.len(); + let checked_bitfields = simple_sanitize_bitfields( + signed_bitfields, + DisputedBitfield::zeros(expected_bits()), + expected_bits(), + ); + assert_eq!(checked_bitfields.len(), old_len, "No bitfields should have been filtered!"); + + let v = process_bitfields(checked_bitfields); + assert_eq!( + vec![(CoreIndex(3), candidate_c_2.hash()), (CoreIndex(4), candidate_c_3.hash())], + v + ); + + assert!(>::get(&chain_a).unwrap().is_empty()); + assert_eq!( + >::get(&chain_b) + .unwrap() + .pop_front() + .unwrap() + .availability_votes, + votes(&[0, 1, 2, 3]) + ); + assert!(>::get(&chain_c).unwrap().is_empty()); + + // and check that chain heads. + assert_eq!(Paras::para_head(&chain_a), Some(vec![1, 2, 3, 4].into())); + assert_ne!(Paras::para_head(&chain_b), Some(vec![5, 6, 7, 8].into())); + assert_eq!(Paras::para_head(&chain_c), Some(vec![11, 12].into())); + + // Check that rewards are applied. + { + let rewards = crate::mock::availability_rewards(); + + assert_eq!(rewards.len(), 6); + assert_eq!(rewards.get(&ValidatorIndex(0)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(2)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &3); + assert_eq!(rewards.get(&ValidatorIndex(5)).unwrap(), &1); } { let rewards = crate::mock::backing_rewards(); - assert_eq!(rewards.len(), 2); + assert_eq!(rewards.len(), 5); assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &1); assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(5)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(6)).unwrap(), &1); } }); } @@ -878,6 +1179,7 @@ fn candidate_checks() { Sr25519Keyring::Charlie, Sr25519Keyring::Dave, Sr25519Keyring::Ferdie, + Sr25519Keyring::One, ]; let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); for validator in validators.iter() { @@ -904,7 +1206,8 @@ fn candidate_checks() { group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1]), group_index if group_index == GroupIndex::from(1) => Some(vec![2, 3]), group_index if group_index == GroupIndex::from(2) => Some(vec![4]), - _ => panic!("Group index out of bounds for 2 parachains and 1 parathread core"), + group_index if group_index == GroupIndex::from(3) => Some(vec![5]), + _ => panic!("Group index out of bounds"), } .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) }; @@ -914,12 +1217,12 @@ fn candidate_checks() { vec![ValidatorIndex(0), ValidatorIndex(1)], vec![ValidatorIndex(2), ValidatorIndex(3)], vec![ValidatorIndex(4)], + vec![ValidatorIndex(5)], ]; Scheduler::set_validator_groups(validator_groups); let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); let chain_a_assignment = (chain_a, CoreIndex::from(0)); - let chain_b_assignment = (chain_b, CoreIndex::from(1)); let thread_a_assignment = (thread_a, CoreIndex::from(2)); @@ -929,14 +1232,14 @@ fn candidate_checks() { assert_eq!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![], + &BTreeMap::new(), &group_validators, false ), Ok(ProcessedCandidates::default()) ); - // candidates out of order. + // Check candidate ordering { let mut candidate_a = TestCandidateBuilder { para_id: chain_a, @@ -947,19 +1250,37 @@ fn candidate_checks() { ..Default::default() } .build(); - let mut candidate_b = TestCandidateBuilder { + let mut candidate_b_1 = TestCandidateBuilder { para_id: chain_b, relay_parent: System::parent_hash(), pov_hash: Hash::repeat_byte(2), persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![1, 2, 3]), ..Default::default() } .build(); - collator_sign_candidate(Sr25519Keyring::One, &mut candidate_a); + // Make candidate b2 a child of b1. + let mut candidate_b_2 = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(3), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::( + RELAY_PARENT_NUM, + Default::default(), + candidate_b_1.commitments.head_data.clone(), + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![5, 6, 7]), + ..Default::default() + } + .build(); - collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_a); + collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b_1); + collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b_2); let backed_a = back_candidate( candidate_a, @@ -971,8 +1292,18 @@ fn candidate_checks() { None, ); - let backed_b = back_candidate( - candidate_b, + let backed_b_1 = back_candidate( + candidate_b_1.clone(), + &validators, + group_validators(GroupIndex::from(2)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + let backed_b_2 = back_candidate( + candidate_b_2, &validators, group_validators(GroupIndex::from(1)).unwrap().as_ref(), &keystore, @@ -981,15 +1312,82 @@ fn candidate_checks() { None, ); - // out-of-order manifests as unscheduled. + // candidates are required to be sorted in dependency order. assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_b, chain_b_assignment.1), (backed_a, chain_a_assignment.1)], + &vec![( + chain_b, + vec![ + (backed_b_2.clone(), CoreIndex(1)), + (backed_b_1.clone(), CoreIndex(2)) + ] + ),] + .into_iter() + .collect(), &group_validators, false ), - Error::::ScheduledOutOfOrder + Error::::ValidationDataHashMismatch + ); + + // candidates are no longer required to be sorted by core index. + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![ + ( + chain_b, + vec![ + (backed_b_1.clone(), CoreIndex(2)), + (backed_b_2.clone(), CoreIndex(1)), + ], + ), + (chain_a_assignment.0, vec![(backed_a.clone(), chain_a_assignment.1)]), + ] + .into_iter() + .collect(), + &group_validators, + false, + ) + .unwrap(); + + // candidate does not build on top of the latest unincluded head + + let mut candidate_b_3 = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(4), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::( + RELAY_PARENT_NUM, + Default::default(), + candidate_b_1.commitments.head_data.clone(), + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![8, 9]), + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b_3); + + let backed_b_3 = back_candidate( + candidate_b_3, + &validators, + group_validators(GroupIndex::from(3)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + assert_noop!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![(chain_b, vec![(backed_b_3, CoreIndex(3))])].into_iter().collect(), + &group_validators, + false + ), + Error::::ValidationDataHashMismatch ); } @@ -1006,8 +1404,9 @@ fn candidate_checks() { .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + // Insufficient backing. let backed = back_candidate( - candidate, + candidate.clone(), &validators, group_validators(GroupIndex::from(0)).unwrap().as_ref(), &keystore, @@ -1019,12 +1418,37 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), Error::::InsufficientBacking ); + + // Wrong backing group. + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(1)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + assert_noop!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), + &group_validators, + false + ), + Error::::InvalidBacking + ); } // one of candidates is not based on allowed relay parent. @@ -1078,7 +1502,12 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_b, chain_b_assignment.1), (backed_a, chain_a_assignment.1)], + &vec![ + (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), + (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]) + ] + .into_iter() + .collect(), &group_validators, false ), @@ -1117,7 +1546,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, thread_a_assignment.1)], + &vec![(thread_a_assignment.0, vec![(backed, thread_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1125,100 +1556,6 @@ fn candidate_checks() { ); } - // para occupied - reject. - { - let mut candidate = TestCandidateBuilder { - para_id: chain_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(1), - persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - - let backed = back_candidate( - candidate, - &validators, - group_validators(GroupIndex::from(0)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - None, - ); - - let candidate = TestCandidateBuilder::default().build(); - >::insert( - &chain_a, - CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate.hash(), - descriptor: candidate.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: 3, - backed_in_number: 4, - backers: default_backing_bitfield(), - backing_group: GroupIndex::from(0), - }, - ); - >::insert(&chain_a, candidate.commitments); - - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], - &group_validators, - false - ), - Error::::CandidateScheduledBeforeParaFree - ); - - >::remove(&chain_a); - >::remove(&chain_a); - } - - // messed up commitments storage - do not panic - reject. - { - let mut candidate = TestCandidateBuilder { - para_id: chain_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(1), - persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - - // this is not supposed to happen - >::insert(&chain_a, candidate.commitments.clone()); - - let backed = back_candidate( - candidate, - &validators, - group_validators(GroupIndex::from(0)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - None, - ); - - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], - &group_validators, - false - ), - Error::::CandidateScheduledBeforeParaFree - ); - - >::remove(&chain_a); - } - // interfering code upgrade - reject { let mut candidate = TestCandidateBuilder { @@ -1260,7 +1597,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1292,14 +1631,16 @@ fn candidate_checks() { None, ); - assert_eq!( + assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, - false + false, ), - Err(Error::::ValidationDataHashMismatch.into()), + Error::::ValidationDataHashMismatch ); } @@ -1331,7 +1672,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1367,7 +1710,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1506,16 +1851,21 @@ fn backing_works() { ); let backed_candidates = vec![ - (backed_a.clone(), chain_a_assignment.1), - (backed_b.clone(), chain_b_assignment.1), - (backed_c, thread_a_assignment.1), - ]; + (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]), + (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), + (thread_a_assignment.0, vec![(backed_c, thread_a_assignment.1)]), + ] + .into_iter() + .collect::>(); + let get_backing_group_idx = { // the order defines the group implicitly for this test case let backed_candidates_with_groups = backed_candidates - .iter() + .values() .enumerate() - .map(|(idx, (backed_candidate, _))| (backed_candidate.hash(), GroupIndex(idx as _))) + .map(|(idx, backed_candidates)| { + (backed_candidates.iter().next().unwrap().0.hash(), GroupIndex(idx as _)) + }) .collect::>(); move |candidate_hash_x: CandidateHash| -> Option { @@ -1534,7 +1884,7 @@ fn backing_works() { candidate_receipt_with_backing_validator_indices, } = ParaInclusion::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), + &backed_candidates, &group_validators, false, ) @@ -1555,7 +1905,8 @@ fn backing_works() { CandidateHash, (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), >::new(); - backed_candidates.into_iter().for_each(|(backed_candidate, _)| { + backed_candidates.values().for_each(|backed_candidates| { + let backed_candidate = backed_candidates.iter().next().unwrap().0.clone(); let candidate_receipt_with_backers = intermediate .entry(backed_candidate.hash()) .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); @@ -1606,20 +1957,21 @@ fn backing_works() { }; assert_eq!( >::get(&chain_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate_a.hash(), - descriptor: candidate_a.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(0), - }) - ); - assert_eq!( - >::get(&chain_a), - Some(candidate_a.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + commitments: candidate_a.commitments, + }] + .into_iter() + .collect::>() + ) ); let backers = { @@ -1631,38 +1983,40 @@ fn backing_works() { }; assert_eq!( >::get(&chain_b), - Some(CandidatePendingAvailability { - core: CoreIndex::from(1), - hash: candidate_b.hash(), - descriptor: candidate_b.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(1), - }) - ); - assert_eq!( - >::get(&chain_b), - Some(candidate_b.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(1), + hash: candidate_b.hash(), + descriptor: candidate_b.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(1), + commitments: candidate_b.commitments, + }] + .into_iter() + .collect::>() + ) ); assert_eq!( >::get(&thread_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(2), - hash: candidate_c.hash(), - descriptor: candidate_c.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers: backing_bitfield(&[4]), - backing_group: GroupIndex::from(2), - }) - ); - assert_eq!( - >::get(&thread_a), - Some(candidate_c.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_c.hash(), + descriptor: candidate_c.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[4]), + backing_group: GroupIndex::from(2), + commitments: candidate_c.commitments + }] + .into_iter() + .collect::>() + ) ); }); } @@ -1750,11 +2104,17 @@ fn backing_works_with_elastic_scaling_mvp() { .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate_b_1); + // Make candidate b2 a child of b1. let mut candidate_b_2 = TestCandidateBuilder { para_id: chain_b, relay_parent: System::parent_hash(), pov_hash: Hash::repeat_byte(3), - persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::( + RELAY_PARENT_NUM, + Default::default(), + candidate_b_1.commitments.head_data.clone(), + ) + .hash(), hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() } @@ -1791,18 +2151,25 @@ fn backing_works_with_elastic_scaling_mvp() { Some(CoreIndex(2)), ); - let backed_candidates = vec![ - (backed_a.clone(), CoreIndex(0)), - (backed_b_1.clone(), CoreIndex(1)), - (backed_b_2.clone(), CoreIndex(2)), - ]; + let mut backed_candidates = BTreeMap::new(); + backed_candidates.insert(chain_a, vec![(backed_a, CoreIndex(0))]); + backed_candidates + .insert(chain_b, vec![(backed_b_1, CoreIndex(1)), (backed_b_2, CoreIndex(2))]); + let get_backing_group_idx = { // the order defines the group implicitly for this test case let backed_candidates_with_groups = backed_candidates - .iter() + .values() .enumerate() - .map(|(idx, (backed_candidate, _))| (backed_candidate.hash(), GroupIndex(idx as _))) - .collect::>(); + .map(|(idx, backed_candidates)| { + backed_candidates + .iter() + .enumerate() + .map(|(i, c)| (c.0.hash(), GroupIndex((idx + i) as _))) + .collect() + }) + .collect::>>() + .concat(); move |candidate_hash_x: CandidateHash| -> Option { backed_candidates_with_groups.iter().find_map(|(candidate_hash, grp)| { @@ -1820,14 +2187,13 @@ fn backing_works_with_elastic_scaling_mvp() { candidate_receipt_with_backing_validator_indices, } = ParaInclusion::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), + &backed_candidates, &group_validators, true, ) .expect("candidates scheduled, in order, and backed"); - // Both b candidates will be backed. However, only one will be recorded on-chain and proceed - // with being made available. + // Both b candidates will be backed. assert_eq!( occupied_cores, vec![ @@ -1842,26 +2208,29 @@ fn backing_works_with_elastic_scaling_mvp() { CandidateHash, (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), >::new(); - backed_candidates.into_iter().for_each(|(backed_candidate, _)| { - let candidate_receipt_with_backers = expected - .entry(backed_candidate.hash()) - .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); - let (validator_indices, _maybe_core_index) = - backed_candidate.validator_indices_and_core_index(true); - assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones()); - candidate_receipt_with_backers.1.extend( - validator_indices - .iter() - .enumerate() - .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes().iter().cloned()) - .filter_map(|((validator_index_within_group, _), attestation)| { - let grp_idx = get_backing_group_idx(backed_candidate.hash()).unwrap(); - group_validators(grp_idx).map(|validator_indices| { - (validator_indices[validator_index_within_group], attestation) - }) - }), - ); + backed_candidates.values().for_each(|backed_candidates| { + for backed_candidate in backed_candidates { + let backed_candidate = backed_candidate.0.clone(); + let candidate_receipt_with_backers = expected + .entry(backed_candidate.hash()) + .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); + let (validator_indices, _maybe_core_index) = + backed_candidate.validator_indices_and_core_index(true); + assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones()); + candidate_receipt_with_backers.1.extend( + validator_indices + .iter() + .enumerate() + .filter(|(_, signed)| **signed) + .zip(backed_candidate.validity_votes().iter().cloned()) + .filter_map(|((validator_index_within_group, _), attestation)| { + let grp_idx = get_backing_group_idx(backed_candidate.hash()).unwrap(); + group_validators(grp_idx).map(|validator_indices| { + (validator_indices[validator_index_within_group], attestation) + }) + }), + ); + } }); assert_eq!( @@ -1881,39 +2250,54 @@ fn backing_works_with_elastic_scaling_mvp() { }; assert_eq!( >::get(&chain_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate_a.hash(), - descriptor: candidate_a.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(0), - }) - ); - assert_eq!( - >::get(&chain_a), - Some(candidate_a.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + commitments: candidate_a.commitments + }] + .into_iter() + .collect::>() + ) ); - // Only one candidate for b will be recorded on chain. + // Both candidates of b will be recorded on chain. assert_eq!( >::get(&chain_b), - Some(CandidatePendingAvailability { - core: CoreIndex::from(2), - hash: candidate_b_2.hash(), - descriptor: candidate_b_2.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers: backing_bitfield(&[4]), - backing_group: GroupIndex::from(2), - }) - ); - assert_eq!( - >::get(&chain_b), - Some(candidate_b_2.commitments), + Some( + [ + CandidatePendingAvailability { + core: CoreIndex::from(1), + hash: candidate_b_1.hash(), + descriptor: candidate_b_1.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[2, 3]), + backing_group: GroupIndex::from(1), + commitments: candidate_b_1.commitments + }, + CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_b_2.hash(), + descriptor: candidate_b_2.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[4]), + backing_group: GroupIndex::from(2), + commitments: candidate_b_2.commitments + } + ] + .into_iter() + .collect::>() + ) ); }); } @@ -1998,8 +2382,10 @@ fn can_include_candidate_with_ok_code_upgrade() { let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_a, chain_a_assignment.1)], - &group_validators, + &vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])] + .into_iter() + .collect::>(), + group_validators, false, ) .expect("candidates scheduled, in order, and backed"); @@ -2015,20 +2401,21 @@ fn can_include_candidate_with_ok_code_upgrade() { }; assert_eq!( >::get(&chain_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate_a.hash(), - descriptor: candidate_a.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(0), - }) - ); - assert_eq!( - >::get(&chain_a), - Some(candidate_a.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + commitments: candidate_a.commitments + }] + .into_iter() + .collect::>() + ) ); }); } @@ -2209,14 +2596,16 @@ fn check_allowed_relay_parents() { ); let backed_candidates = vec![ - (backed_a, chain_a_assignment.1), - (backed_b, chain_b_assignment.1), - (backed_c, thread_a_assignment.1), - ]; + (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]), + (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), + (thread_a_assignment.0, vec![(backed_c, thread_a_assignment.1)]), + ] + .into_iter() + .collect::>(); ParaInclusion::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), + &backed_candidates, &group_validators, false, ) @@ -2264,25 +2653,10 @@ fn session_change_wipes() { run_to_block(10, |_| None); - >::insert( - &ValidatorIndex(0), - AvailabilityBitfieldRecord { bitfield: default_bitfield(), submitted_at: 9 }, - ); - - >::insert( - &ValidatorIndex(1), - AvailabilityBitfieldRecord { bitfield: default_bitfield(), submitted_at: 9 }, - ); - - >::insert( - &ValidatorIndex(4), - AvailabilityBitfieldRecord { bitfield: default_bitfield(), submitted_at: 9 }, - ); - let candidate = TestCandidateBuilder::default().build(); >::insert( &chain_a, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(0), hash: candidate.hash(), descriptor: candidate.descriptor.clone(), @@ -2291,13 +2665,15 @@ fn session_change_wipes() { backed_in_number: 6, backers: default_backing_bitfield(), backing_group: GroupIndex::from(0), - }, + commitments: candidate.commitments.clone(), + }] + .into_iter() + .collect::>(), ); - >::insert(&chain_a, candidate.commitments.clone()); >::insert( &chain_b, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(1), hash: candidate.hash(), descriptor: candidate.descriptor, @@ -2306,22 +2682,18 @@ fn session_change_wipes() { backed_in_number: 7, backers: default_backing_bitfield(), backing_group: GroupIndex::from(1), - }, + commitments: candidate.commitments, + }] + .into_iter() + .collect::>(), ); - >::insert(&chain_b, candidate.commitments); run_to_block(11, |_| None); assert_eq!(shared::Pallet::::session_index(), 5); - assert!(>::get(&ValidatorIndex(0)).is_some()); - assert!(>::get(&ValidatorIndex(1)).is_some()); - assert!(>::get(&ValidatorIndex(4)).is_some()); - assert!(>::get(&chain_a).is_some()); assert!(>::get(&chain_b).is_some()); - assert!(>::get(&chain_a).is_some()); - assert!(>::get(&chain_b).is_some()); run_to_block(12, |n| match n { 12 => Some(SessionChangeNotification { @@ -2337,18 +2709,7 @@ fn session_change_wipes() { assert_eq!(shared::Pallet::::session_index(), 6); - assert!(>::get(&ValidatorIndex(0)).is_none()); - assert!(>::get(&ValidatorIndex(1)).is_none()); - assert!(>::get(&ValidatorIndex(4)).is_none()); - - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_none()); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_none()); - - assert!(>::iter().collect::>().is_empty()); assert!(>::iter().collect::>().is_empty()); - assert!(>::iter().collect::>().is_empty()); }); } @@ -2420,11 +2781,6 @@ fn para_upgrade_delay_scheduled_from_inclusion() { ]]; Scheduler::set_validator_groups(validator_groups); - let core_lookup = |core| match core { - core if core == CoreIndex::from(0) => Some(chain_a), - _ => None, - }; - let allowed_relay_parents = default_allowed_relay_parent_tracker(); let chain_a_assignment = (chain_a, CoreIndex::from(0)); @@ -2453,7 +2809,9 @@ fn para_upgrade_delay_scheduled_from_inclusion() { let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_a, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])] + .into_iter() + .collect::>(), &group_validators, false, ) @@ -2488,11 +2846,10 @@ fn para_upgrade_delay_scheduled_from_inclusion() { expected_bits(), ); - let v = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let v = process_bitfields(checked_bitfields); assert_eq!(vec![(CoreIndex(0), candidate_a.hash())], v); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_a).is_none()); + assert!(>::get(&chain_a).unwrap().is_empty()); let active_vote_state = paras::Pallet::::active_vote_state(&new_validation_code_hash) .expect("prechecking must be initiated"); diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index f3365945758..1b07acffb15 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -145,10 +145,6 @@ benchmarks! { assert_eq!(backing_validators.1.len(), votes); } - assert_eq!( - inclusion::PendingAvailabilityCommitments::::iter().count(), - cores_with_backed.len() - ); assert_eq!( inclusion::PendingAvailability::::iter().count(), cores_with_backed.len() @@ -209,10 +205,6 @@ benchmarks! { ); } - assert_eq!( - inclusion::PendingAvailabilityCommitments::::iter().count(), - cores_with_backed.len() - ); assert_eq!( inclusion::PendingAvailability::::iter().count(), cores_with_backed.len() diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 723a15bdba7..02ddfd0acca 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -24,8 +24,7 @@ use crate::{ configuration, disputes::DisputesHandler, - inclusion, - inclusion::CandidateCheckContext, + inclusion::{self, CandidateCheckContext}, initializer, metrics::METRICS, paras, @@ -35,6 +34,7 @@ use crate::{ }; use bitvec::prelude::BitVec; use frame_support::{ + defensive, dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, inherent::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}, pallet_prelude::*, @@ -45,7 +45,7 @@ use pallet_babe::{self, ParentBlockRandomness}; use primitives::{ effective_minimum_backing_votes, vstaging::node_features::FeatureIndex, BackedCandidate, CandidateHash, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, - CoreIndex, DisputeStatementSet, InherentData as ParachainsInherentData, + CoreIndex, DisputeStatementSet, HeadData, InherentData as ParachainsInherentData, MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, SignedAvailabilityBitfields, SigningContext, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, ValidatorId, ValidatorIndex, ValidityAttestation, PARACHAINS_INHERENT_IDENTIFIER, @@ -134,18 +134,11 @@ pub mod pallet { /// The hash of the submitted parent header doesn't correspond to the saved block hash of /// the parent. InvalidParentHeader, - /// Disputed candidate that was concluded invalid. - CandidateConcludedInvalid, /// The data given to the inherent will result in an overweight block. InherentOverweight, - /// The ordering of dispute statements was invalid. - DisputeStatementsUnsortedOrDuplicates, - /// A dispute statement was invalid. - DisputeInvalid, - /// A candidate was backed by a disabled validator - BackedByDisabled, - /// A candidate was backed even though the paraid was not scheduled. - BackedOnUnscheduledCore, + /// A candidate was filtered during inherent execution. This should have only been done + /// during creation. + CandidatesFilteredDuringExecution, /// Too many candidates supplied. UnscheduledCandidate, } @@ -235,35 +228,6 @@ pub mod pallet { } } - /// Collect all freed cores based on storage data. (i.e. append cores freed from timeouts to - /// the given `freed_concluded`). - /// - /// The parameter `freed_concluded` contains all core indicies that became - /// free due to candidate that became available. - pub(crate) fn collect_all_freed_cores( - freed_concluded: I, - ) -> BTreeMap - where - I: core::iter::IntoIterator, - T: Config, - { - // Handle timeouts for any availability core work. - let freed_timeout = if >::availability_timeout_check_required() { - let pred = >::availability_timeout_predicate(); - >::collect_pending(pred) - } else { - Vec::new() - }; - - // Schedule paras again, given freed cores, and reasons for freeing. - let freed = freed_concluded - .into_iter() - .map(|(c, _hash)| (c, FreedReason::Concluded)) - .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) - .collect::>(); - freed - } - #[pallet::call] impl Pallet { /// Enter the paras inherent. This will process bitfields and backed candidates. @@ -319,7 +283,7 @@ impl Pallet { /// Process inherent data. /// /// The given inherent data is processed and state is altered accordingly. If any data could - /// not be applied (inconsitencies, weight limit, ...) it is removed. + /// not be applied (inconsistencies, weight limit, ...) it is removed. /// /// When called from `create_inherent` the `context` must be set to /// `ProcessInherentDataContext::ProvideInherent` so it guarantees the invariant that inherent @@ -526,7 +490,7 @@ impl Pallet { // Contains the disputes that are concluded in the current session only, // since these are the only ones that are relevant for the occupied cores - // and lightens the load on `collect_disputed` significantly. + // and lightens the load on `free_disputed` significantly. // Cores can't be occupied with candidates of the previous sessions, and only // things with new votes can have just concluded. We only need to collect // cores with disputes that conclude just now, because disputes that @@ -542,21 +506,17 @@ impl Pallet { .map(|(_session, candidate)| candidate) .collect::>(); - let freed_disputed: BTreeMap = - >::collect_disputed(¤t_concluded_invalid_disputes) + // Get the cores freed as a result of concluded invalid candidates. + let (freed_disputed, concluded_invalid_hashes): (Vec, BTreeSet) = + >::free_disputed(¤t_concluded_invalid_disputes) .into_iter() - .map(|core| (core, FreedReason::Concluded)) - .collect(); + .unzip(); // Create a bit index from the set of core indices where each index corresponds to // a core index that was freed due to a dispute. // // I.e. 010100 would indicate, the candidates on Core 1 and 3 would be disputed. - let disputed_bitfield = create_disputed_bitfield(expected_bits, freed_disputed.keys()); - - if !freed_disputed.is_empty() { - >::free_cores_and_fill_claimqueue(freed_disputed.clone(), now); - } + let disputed_bitfield = create_disputed_bitfield(expected_bits, freed_disputed.iter()); let bitfields = sanitize_bitfields::( bitfields, @@ -571,11 +531,9 @@ impl Pallet { // Process new availability bitfields, yielding any availability cores whose // work has now concluded. let freed_concluded = - >::update_pending_availability_and_get_freed_cores::<_>( - expected_bits, + >::update_pending_availability_and_get_freed_cores( &validator_public[..], bitfields.clone(), - >::core_para, ); // Inform the disputes module of all included candidates. @@ -585,8 +543,24 @@ impl Pallet { METRICS.on_candidates_included(freed_concluded.len() as u64); - let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); + // Get the timed out candidates + let freed_timeout = if >::availability_timeout_check_required() { + >::free_timedout() + } else { + Vec::new() + }; + + if !freed_timeout.is_empty() { + log::debug!(target: LOG_TARGET, "Evicted timed out cores: {:?}", freed_timeout); + } + // We'll schedule paras again, given freed cores, and reasons for freeing. + let freed = freed_concluded + .into_iter() + .map(|(c, _hash)| (c, FreedReason::Concluded)) + .chain(freed_disputed.into_iter().map(|core| (core, FreedReason::Concluded))) + .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) + .collect::>(); >::free_cores_and_fill_claimqueue(freed, now); METRICS.on_candidates_processed_total(backed_candidates.len() as u64); @@ -605,55 +579,28 @@ impl Pallet { scheduled.entry(para_id).or_default().insert(core_idx); } - let SanitizedBackedCandidates { - backed_candidates_with_core, - votes_from_disabled_were_dropped, - dropped_unscheduled_candidates, - } = sanitize_backed_candidates::( + let initial_candidate_count = backed_candidates.len(); + let backed_candidates_with_core = sanitize_backed_candidates::( backed_candidates, &allowed_relay_parents, - |candidate_idx: usize, - backed_candidate: &BackedCandidate<::Hash>| - -> bool { - let para_id = backed_candidate.descriptor().para_id; - let prev_context = >::para_most_recent_context(para_id); - let check_ctx = CandidateCheckContext::::new(prev_context); - - // never include a concluded-invalid candidate - current_concluded_invalid_disputes.contains(&backed_candidate.hash()) || - // Instead of checking the candidates with code upgrades twice - // move the checking up here and skip it in the training wheels fallback. - // That way we avoid possible duplicate checks while assuring all - // backed candidates fine to pass on. - // - // NOTE: this is the only place where we check the relay-parent. - check_ctx - .verify_backed_candidate(&allowed_relay_parents, candidate_idx, backed_candidate.candidate()) - .is_err() - }, + concluded_invalid_hashes, scheduled, core_index_enabled, ); + let count = count_backed_candidates(&backed_candidates_with_core); - ensure!( - backed_candidates_with_core.len() <= total_scheduled_cores, - Error::::UnscheduledCandidate - ); - - METRICS.on_candidates_sanitized(backed_candidates_with_core.len() as u64); + ensure!(count <= total_scheduled_cores, Error::::UnscheduledCandidate); - // In `Enter` context (invoked during execution) there should be no backing votes from - // disabled validators because they should have been filtered out during inherent data - // preparation (`ProvideInherent` context). Abort in such cases. - if context == ProcessInherentDataContext::Enter { - ensure!(!votes_from_disabled_were_dropped, Error::::BackedByDisabled); - } + METRICS.on_candidates_sanitized(count as u64); - // In `Enter` context (invoked during execution) we shouldn't have filtered any candidates - // due to a para not being scheduled. They have been filtered during inherent data - // preparation (`ProvideInherent` context). Abort in such cases. + // In `Enter` context (invoked during execution) no more candidates should be filtered, + // because they have already been filtered during `ProvideInherent` context. Abort in such + // cases. if context == ProcessInherentDataContext::Enter { - ensure!(!dropped_unscheduled_candidates, Error::::BackedOnUnscheduledCore); + ensure!( + initial_candidate_count == count, + Error::::CandidatesFilteredDuringExecution + ); } // Process backed candidates according to scheduled cores. @@ -662,7 +609,7 @@ impl Pallet { candidate_receipt_with_backing_validator_indices, } = >::process_candidates( &allowed_relay_parents, - backed_candidates_with_core.clone(), + &backed_candidates_with_core, >::group_validators, core_index_enabled, )?; @@ -683,10 +630,13 @@ impl Pallet { let processed = ParachainsInherentData { bitfields, - backed_candidates: backed_candidates_with_core - .into_iter() - .map(|(candidate, _)| candidate) - .collect(), + backed_candidates: backed_candidates_with_core.into_iter().fold( + Vec::with_capacity(count), + |mut acc, (_id, candidates)| { + acc.extend(candidates.into_iter().map(|(c, _)| c)); + acc + }, + ), disputes, parent_header, }; @@ -986,83 +936,86 @@ pub(crate) fn sanitize_bitfields( bitfields } -// Result from `sanitize_backed_candidates` -#[derive(Debug, PartialEq)] -struct SanitizedBackedCandidates { - // Sanitized backed candidates along with the assigned core. The `Vec` is sorted according to - // the occupied core index. - backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, - // Set to true if any votes from disabled validators were dropped from the input. - votes_from_disabled_were_dropped: bool, - // Set to true if any candidates were dropped due to filtering done in - // `map_candidates_to_cores` - dropped_unscheduled_candidates: bool, -} - +/// Performs various filtering on the backed candidates inherent data. +/// Must maintain the invariant that the returned candidate collection contains the candidates +/// sorted in dependency order for each para. When doing any filtering, we must therefore drop any +/// subsequent candidates after the filtered one. +/// /// Filter out: -/// 1. any candidates that have a concluded invalid dispute -/// 2. any unscheduled candidates, as well as candidates whose paraid has multiple cores assigned +/// 1. any candidates which don't form a chain with the other candidates of the paraid (even if they +/// do form a chain but are not in the right order). +/// 2. any candidates that have a concluded invalid dispute or who are descendants of a concluded +/// invalid candidate. +/// 3. any unscheduled candidates, as well as candidates whose paraid has multiple cores assigned /// but have no injected core index. -/// 3. all backing votes from disabled validators -/// 4. any candidates that end up with less than `effective_minimum_backing_votes` backing votes +/// 4. all backing votes from disabled validators +/// 5. any candidates that end up with less than `effective_minimum_backing_votes` backing votes /// -/// `scheduled` follows the same naming scheme as provided in the -/// guide: Currently `free` but might become `occupied`. -/// For the filtering here the relevant part is only the current `free` -/// state. -/// -/// `candidate_has_concluded_invalid_dispute` must return `true` if the candidate -/// is disputed, false otherwise. The passed `usize` is the candidate index. -/// -/// Returns struct `SanitizedBackedCandidates` where `backed_candidates` are sorted according to the -/// occupied core index. -fn sanitize_backed_candidates< - T: crate::inclusion::Config, - F: FnMut(usize, &BackedCandidate) -> bool, ->( - mut backed_candidates: Vec>, +/// Returns the scheduled +/// backed candidates which passed filtering, mapped by para id and in the right dependency order. +fn sanitize_backed_candidates( + backed_candidates: Vec>, allowed_relay_parents: &AllowedRelayParentsTracker>, - mut candidate_has_concluded_invalid_dispute_or_is_invalid: F, + concluded_invalid_with_descendants: BTreeSet, scheduled: BTreeMap>, core_index_enabled: bool, -) -> SanitizedBackedCandidates { - // Remove any candidates that were concluded invalid. - // This does not assume sorting. - backed_candidates.indexed_retain(move |candidate_idx, backed_candidate| { - !candidate_has_concluded_invalid_dispute_or_is_invalid(candidate_idx, backed_candidate) +) -> BTreeMap, CoreIndex)>> { + // Map the candidates to the right paraids, while making sure that the order between candidates + // of the same para is preserved. + let mut candidates_per_para: BTreeMap> = BTreeMap::new(); + for candidate in backed_candidates { + candidates_per_para + .entry(candidate.descriptor().para_id) + .or_default() + .push(candidate); + } + + // Check that candidates pertaining to the same para form a chain. Drop the ones that + // don't, along with the rest of candidates which follow them in the input vector. + filter_unchained_candidates::(&mut candidates_per_para, allowed_relay_parents); + + // Remove any candidates that were concluded invalid or who are descendants of concluded invalid + // candidates (along with their descendants). + retain_candidates::(&mut candidates_per_para, |_, candidate| { + let keep = !concluded_invalid_with_descendants.contains(&candidate.candidate().hash()); + + if !keep { + log::debug!( + target: LOG_TARGET, + "Found backed candidate {:?} which was concluded invalid or is a descendant of a concluded invalid candidate, for paraid {:?}.", + candidate.candidate().hash(), + candidate.descriptor().para_id + ); + } + keep }); - let initial_candidate_count = backed_candidates.len(); - // Map candidates to scheduled cores. Filter out any unscheduled candidates. + // Map candidates to scheduled cores. Filter out any unscheduled candidates along with their + // descendants. let mut backed_candidates_with_core = map_candidates_to_cores::( &allowed_relay_parents, scheduled, core_index_enabled, - backed_candidates, + candidates_per_para, ); - let dropped_unscheduled_candidates = - initial_candidate_count != backed_candidates_with_core.len(); - - // Filter out backing statements from disabled validators - let votes_from_disabled_were_dropped = filter_backed_statements_from_disabled_validators::( + // Filter out backing statements from disabled validators. If by that we render a candidate with + // less backing votes than required, filter that candidate also. As all the other filtering + // operations above, we drop the descendants of the dropped candidates also. + filter_backed_statements_from_disabled_validators::( &mut backed_candidates_with_core, &allowed_relay_parents, core_index_enabled, ); - // Sort the `Vec` last, once there is a guarantee that these - // `BackedCandidates` references the expected relay chain parent, - // but more importantly are scheduled for a free core. - // This both avoids extra work for obviously invalid candidates, - // but also allows this to be done in place. - backed_candidates_with_core.sort_by(|(_x, core_x), (_y, core_y)| core_x.cmp(&core_y)); - - SanitizedBackedCandidates { - dropped_unscheduled_candidates, - votes_from_disabled_were_dropped, - backed_candidates_with_core, - } + backed_candidates_with_core +} + +fn count_backed_candidates(backed_candidates: &BTreeMap>) -> usize { + backed_candidates.iter().fold(0, |mut count, (_id, candidates)| { + count += candidates.len(); + count + }) } /// Derive entropy from babe provided per block randomness. @@ -1146,48 +1099,82 @@ fn limit_and_sanitize_disputes< } } -// Filters statements from disabled validators in `BackedCandidate`, non-scheduled candidates and -// few more sanity checks. Returns `true` if at least one statement is removed and `false` -// otherwise. -fn filter_backed_statements_from_disabled_validators( - backed_candidates_with_core: &mut Vec<( - BackedCandidate<::Hash>, - CoreIndex, - )>, +// Helper function for filtering candidates which don't pass the given predicate. When/if the first +// candidate which failes the predicate is found, all the other candidates that follow are dropped. +fn retain_candidates< + T: inclusion::Config + paras::Config + inclusion::Config, + F: FnMut(ParaId, &mut C) -> bool, + C, +>( + candidates_per_para: &mut BTreeMap>, + mut pred: F, +) { + for (para_id, candidates) in candidates_per_para.iter_mut() { + let mut latest_valid_idx = None; + + for (idx, candidate) in candidates.iter_mut().enumerate() { + if pred(*para_id, candidate) { + // Found a valid candidate. + latest_valid_idx = Some(idx); + } else { + break + } + } + + if let Some(latest_valid_idx) = latest_valid_idx { + candidates.truncate(latest_valid_idx + 1); + } else { + candidates.clear(); + } + } + + candidates_per_para.retain(|_, c| !c.is_empty()); +} + +// Filters statements from disabled validators in `BackedCandidate` and does a few more sanity +// checks. +fn filter_backed_statements_from_disabled_validators< + T: shared::Config + scheduler::Config + inclusion::Config, +>( + backed_candidates_with_core: &mut BTreeMap< + ParaId, + Vec<(BackedCandidate<::Hash>, CoreIndex)>, + >, allowed_relay_parents: &AllowedRelayParentsTracker>, core_index_enabled: bool, -) -> bool { +) { let disabled_validators = BTreeSet::<_>::from_iter(shared::Pallet::::disabled_validators().into_iter()); if disabled_validators.is_empty() { // No disabled validators - nothing to do - return false + return } - let backed_len_before = backed_candidates_with_core.len(); - - // Flag which will be returned. Set to `true` if at least one vote is filtered. - let mut filtered = false; - let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; // Process all backed candidates. `validator_indices` in `BackedCandidates` are indices within // the validator group assigned to the parachain. To obtain this group we need: // 1. Core index assigned to the parachain which has produced the candidate // 2. The relay chain block number of the candidate - backed_candidates_with_core.retain_mut(|(bc, core_idx)| { - let (validator_indices, maybe_core_index) = bc.validator_indices_and_core_index(core_index_enabled); + retain_candidates::(backed_candidates_with_core, |para_id, (bc, core_idx)| { + let (validator_indices, maybe_core_index) = + bc.validator_indices_and_core_index(core_index_enabled); let mut validator_indices = BitVec::<_>::from(validator_indices); - // Get relay parent block number of the candidate. We need this to get the group index assigned to this core at this block number - let relay_parent_block_number = match allowed_relay_parents - .acquire_info(bc.descriptor().relay_parent, None) { + // Get relay parent block number of the candidate. We need this to get the group index + // assigned to this core at this block number + let relay_parent_block_number = + match allowed_relay_parents.acquire_info(bc.descriptor().relay_parent, None) { Some((_, block_num)) => block_num, None => { - log::debug!(target: LOG_TARGET, "Relay parent {:?} for candidate is not in the allowed relay parents. Dropping the candidate.", bc.descriptor().relay_parent); + log::debug!( + target: LOG_TARGET, + "Relay parent {:?} for candidate is not in the allowed relay parents. Dropping the candidate.", + bc.descriptor().relay_parent + ); return false - } + }, }; // Get the group index for the core @@ -1208,12 +1195,15 @@ fn filter_backed_statements_from_disabled_validators { log::debug!(target: LOG_TARGET, "Can't get the validators from group {:?}. Dropping the candidate.", group_idx); return false - } + }, }; // Bitmask with the disabled indices within the validator group - let disabled_indices = BitVec::::from_iter(validator_group.iter().map(|idx| disabled_validators.contains(idx))); - // The indices of statements from disabled validators in `BackedCandidate`. We have to drop these. + let disabled_indices = BitVec::::from_iter( + validator_group.iter().map(|idx| disabled_validators.contains(idx)), + ); + // The indices of statements from disabled validators in `BackedCandidate`. We have to drop + // these. let indices_to_drop = disabled_indices.clone() & &validator_indices; // Apply the bitmask to drop the disabled validator from `validator_indices` validator_indices &= !disabled_indices; @@ -1225,62 +1215,218 @@ fn filter_backed_statements_from_disabled_validators 0 { - filtered = true; - } - // By filtering votes we might render the candidate invalid and cause a failure in // [`process_candidates`]. To avoid this we have to perform a sanity check here. If there // are not enough backing votes after filtering we will remove the whole candidate. - if bc.validity_votes().len() < effective_minimum_backing_votes( - validator_group.len(), - minimum_backing_votes - ) { + if bc.validity_votes().len() < + effective_minimum_backing_votes(validator_group.len(), minimum_backing_votes) + { + log::debug!( + target: LOG_TARGET, + "Dropping candidate {:?} of paraid {:?} because it was left with too few backing votes after votes from disabled validators were filtered.", + bc.candidate().hash(), + para_id + ); + return false } true }); +} + +// Check that candidates pertaining to the same para form a chain. Drop the ones that +// don't, along with the rest of candidates which follow them in the input vector. +// In the process, duplicated candidates will also be dropped (even if they form a valid cycle; +// cycles are not allowed if they entail backing duplicated candidates). +fn filter_unchained_candidates( + candidates: &mut BTreeMap>>, + allowed_relay_parents: &AllowedRelayParentsTracker>, +) { + let mut para_latest_head_data: BTreeMap = BTreeMap::new(); + for para_id in candidates.keys() { + let latest_head_data = match >::para_latest_head_data(¶_id) { + None => { + defensive!("Latest included head data for paraid {:?} is None", para_id); + continue + }, + Some(latest_head_data) => latest_head_data, + }; + para_latest_head_data.insert(*para_id, latest_head_data); + } + + let mut para_visited_candidates: BTreeMap> = BTreeMap::new(); + + retain_candidates::(candidates, |para_id, candidate| { + let Some(latest_head_data) = para_latest_head_data.get(¶_id) else { return false }; + let candidate_hash = candidate.candidate().hash(); + + let visited_candidates = + para_visited_candidates.entry(para_id).or_insert_with(|| BTreeSet::new()); + if visited_candidates.contains(&candidate_hash) { + log::debug!( + target: LOG_TARGET, + "Found duplicate candidates for paraid {:?}. Dropping the candidates with hash {:?}", + para_id, + candidate_hash + ); + + // If we got a duplicate candidate, stop. + return false + } else { + visited_candidates.insert(candidate_hash); + } + + let prev_context = >::para_most_recent_context(para_id); + let check_ctx = CandidateCheckContext::::new(prev_context); + + let res = match check_ctx.verify_backed_candidate( + &allowed_relay_parents, + candidate.candidate(), + latest_head_data.clone(), + ) { + Ok(_) => true, + Err(err) => { + log::debug!( + target: LOG_TARGET, + "Backed candidate verification for candidate {:?} of paraid {:?} failed with {:?}", + candidate_hash, + para_id, + err + ); + false + }, + }; + + if res { + para_latest_head_data + .insert(para_id, candidate.candidate().commitments.head_data.clone()); + } - // Also return `true` if a whole candidate was dropped from the set - filtered || backed_len_before != backed_candidates_with_core.len() + res + }); } /// Map candidates to scheduled cores. -/// If the para only has one scheduled core and no `CoreIndex` is injected, map the candidate to the +/// If the para only has one scheduled core and one candidate supplied, map the candidate to the /// single core. If the para has multiple cores scheduled, only map the candidates which have a /// proper core injected. Filter out the rest. /// Also returns whether or not we dropped any candidates. +/// When dropping a candidate of a para, we must drop all subsequent candidates from that para +/// (because they form a chain). fn map_candidates_to_cores( allowed_relay_parents: &AllowedRelayParentsTracker>, mut scheduled: BTreeMap>, core_index_enabled: bool, - candidates: Vec>, -) -> Vec<(BackedCandidate, CoreIndex)> { - let mut backed_candidates_with_core = Vec::with_capacity(candidates.len()); - - // We keep a candidate if the parachain has only one core assigned or if - // a core index is provided by block author and it's indeed scheduled. - for backed_candidate in candidates { - let maybe_injected_core_index = get_injected_core_index::( - allowed_relay_parents, - &backed_candidate, - core_index_enabled, - ); + candidates: BTreeMap>>, +) -> BTreeMap, CoreIndex)>> { + let mut backed_candidates_with_core = BTreeMap::new(); + + for (para_id, backed_candidates) in candidates.into_iter() { + if backed_candidates.len() == 0 { + defensive!("Backed candidates for paraid {} is empty.", para_id); + continue + } - let scheduled_cores = scheduled.get_mut(&backed_candidate.descriptor().para_id); - // Candidates without scheduled cores are silently filtered out. + let scheduled_cores = scheduled.get_mut(¶_id); + + // ParaIds without scheduled cores are silently filtered out. if let Some(scheduled_cores) = scheduled_cores { - if let Some(core_idx) = maybe_injected_core_index { - if scheduled_cores.contains(&core_idx) { - scheduled_cores.remove(&core_idx); - backed_candidates_with_core.push((backed_candidate, core_idx)); + if scheduled_cores.len() == 0 { + log::debug!( + target: LOG_TARGET, + "Paraid: {:?} has no scheduled cores but {} candidates were supplied.", + para_id, + backed_candidates.len() + ); + + // Non-elastic scaling case. One core per para. + } else if scheduled_cores.len() == 1 && !core_index_enabled { + backed_candidates_with_core.insert( + para_id, + vec![( + // We need the first one here, as we assume candidates of a para are in + // dependency order. + backed_candidates.into_iter().next().expect("Length is at least 1"), + scheduled_cores.pop_first().expect("Length is 1"), + )], + ); + continue; + + // Elastic scaling case. We only allow candidates which have the right core + // indices injected. + } else if scheduled_cores.len() >= 1 && core_index_enabled { + // We must preserve the dependency order given in the input. + let mut temp_backed_candidates = Vec::with_capacity(scheduled_cores.len()); + + for candidate in backed_candidates { + if scheduled_cores.len() == 0 { + // We've got candidates for all of this para's assigned cores. Move on to + // the next para. + log::debug!( + target: LOG_TARGET, + "Found enough candidates for paraid: {:?}.", + candidate.descriptor().para_id + ); + break; + } + let maybe_injected_core_index: Option = + get_injected_core_index::(allowed_relay_parents, &candidate); + + if let Some(core_index) = maybe_injected_core_index { + if scheduled_cores.remove(&core_index) { + temp_backed_candidates.push((candidate, core_index)); + } else { + // if we got a candidate for a core index which is not scheduled, stop + // the work for this para. the already processed candidate chain in + // temp_backed_candidates is still fine though. + log::debug!( + target: LOG_TARGET, + "Found a backed candidate {:?} with injected core index {}, which is not scheduled for paraid {:?}.", + candidate.candidate().hash(), + core_index.0, + candidate.descriptor().para_id + ); + + break; + } + } else { + // if we got a candidate which does not contain its core index, stop the + // work for this para. the already processed candidate chain in + // temp_backed_candidates is still fine though. + + log::debug!( + target: LOG_TARGET, + "Found a backed candidate {:?} with no injected core index, for paraid {:?} which has multiple scheduled cores.", + candidate.candidate().hash(), + candidate.descriptor().para_id + ); + + break; + } } - } else if scheduled_cores.len() == 1 { - backed_candidates_with_core - .push((backed_candidate, scheduled_cores.pop_first().expect("Length is 1"))); + + if !temp_backed_candidates.is_empty() { + backed_candidates_with_core + .entry(para_id) + .or_insert_with(|| vec![]) + .extend(temp_backed_candidates); + } + } else { + log::warn!( + target: LOG_TARGET, + "Found a paraid {:?} which has multiple scheduled cores but ElasticScalingMVP feature is not enabled: {:?}", + para_id, + scheduled_cores + ); } + } else { + log::debug!( + target: LOG_TARGET, + "Paraid: {:?} has no scheduled cores but {} candidates were supplied.", + para_id, + backed_candidates.len() + ); } } @@ -1290,13 +1436,11 @@ fn map_candidates_to_cores( allowed_relay_parents: &AllowedRelayParentsTracker>, candidate: &BackedCandidate, - core_index_enabled: bool, ) -> Option { // After stripping the 8 bit extensions, the `validator_indices` field length is expected // to be equal to backing group size. If these don't match, the `CoreIndex` is badly encoded, // or not supported. - let (validator_indices, maybe_core_idx) = - candidate.validator_indices_and_core_index(core_index_enabled); + let (validator_indices, maybe_core_idx) = candidate.validator_indices_and_core_index(true); let Some(core_idx) = maybe_core_idx else { return None }; @@ -1306,7 +1450,7 @@ fn get_injected_core_index { log::debug!( target: LOG_TARGET, - "Relay parent {:?} for candidate {:?} is not in the allowed relay parents. Dropping the candidate.", + "Relay parent {:?} for candidate {:?} is not in the allowed relay parents.", candidate.descriptor().relay_parent, candidate.candidate().hash(), ); @@ -1323,9 +1467,8 @@ fn get_injected_core_index { log::debug!( target: LOG_TARGET, - "Can't get the group index for core idx {:?}. Dropping the candidate {:?}.", + "Can't get the group index for core idx {:?}.", core_idx, - candidate.candidate().hash(), ); return None }, @@ -1339,6 +1482,14 @@ fn get_injected_core_index MockGenesisConfig { + MockGenesisConfig { + configuration: configuration::GenesisConfig { + config: HostConfiguration { + max_head_data_size: 0b100000, + scheduler_params: SchedulerParams { + group_rotation_frequency: u32::MAX, + ..Default::default() + }, + ..Default::default() + }, + }, + ..Default::default() + } +} + // In order to facilitate benchmarks as tests we have a benchmark feature gated `WeightInfo` impl // that uses 0 for all the weights. Because all the weights are 0, the tests that rely on // weights for limiting data will fail, so we don't run them when using the benchmark feature. #[cfg(not(feature = "runtime-benchmarks"))] mod enter { - use super::{inclusion::tests::TestCandidateBuilder, *}; use crate::{ builder::{Bench, BenchBuilder}, - mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, + mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, RuntimeOrigin, Test}, scheduler::{ common::{Assignment, AssignmentProvider}, ParasEntry, }, + session_info, }; use assert_matches::assert_matches; + use core::panic; use frame_support::assert_ok; use frame_system::limits; - use primitives::vstaging::SchedulerParams; + use primitives::{vstaging::SchedulerParams, AvailabilityBitfield, UncheckedSigned}; use sp_runtime::Perbill; use sp_std::collections::btree_map::BTreeMap; @@ -45,6 +68,8 @@ mod enter { num_validators_per_core: u32, code_upgrade: Option, fill_claimqueue: bool, + elastic_paras: BTreeMap, + unavailable_cores: Vec, } fn make_inherent_data( @@ -55,25 +80,39 @@ mod enter { num_validators_per_core, code_upgrade, fill_claimqueue, + elastic_paras, + unavailable_cores, }: TestConfig, ) -> Bench { + let extra_cores = elastic_paras + .values() + .map(|count| *count as usize) + .sum::() + .saturating_sub(elastic_paras.len() as usize); + let total_cores = dispute_sessions.len() + backed_and_concluding.len() + extra_cores; + let builder = BenchBuilder::::new() - .set_max_validators( - (dispute_sessions.len() + backed_and_concluding.len()) as u32 * - num_validators_per_core, - ) + .set_max_validators((total_cores) as u32 * num_validators_per_core) + .set_elastic_paras(elastic_paras.clone()) .set_max_validators_per_core(num_validators_per_core) .set_dispute_statements(dispute_statements) - .set_backed_and_concluding_paras(backed_and_concluding) + .set_backed_and_concluding_paras(backed_and_concluding.clone()) .set_dispute_sessions(&dispute_sessions[..]) - .set_fill_claimqueue(fill_claimqueue); + .set_fill_claimqueue(fill_claimqueue) + .set_unavailable_cores(unavailable_cores); // Setup some assignments as needed: mock_assigner::Pallet::::set_core_count(builder.max_cores()); - for core_index in 0..builder.max_cores() { - // Core index == para_id in this case - mock_assigner::Pallet::::add_test_assignment(Assignment::Bulk(core_index.into())); - } + + (0..(builder.max_cores() as usize - extra_cores)).for_each(|para_id| { + (0..elastic_paras.get(&(para_id as u32)).cloned().unwrap_or(1)).for_each( + |_para_local_core_idx| { + mock_assigner::Pallet::::add_test_assignment(Assignment::Bulk( + para_id.into(), + )); + }, + ); + }); if let Some(code_size) = code_upgrade { builder.set_code_upgrade(code_size).build() @@ -104,6 +143,8 @@ mod enter { num_validators_per_core: 1, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); // We expect the scenario to have cores 0 & 1 with pending availability. The backed @@ -145,6 +186,305 @@ mod enter { Pallet::::on_chain_votes().unwrap().session, 2 ); + + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(0)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(0)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(1)] + ); + }); + } + + #[test] + fn include_backed_candidates_elastic_scaling() { + // ParaId 0 has one pending candidate on core 0. + // ParaId 1 has one pending candidate on core 1. + // ParaId 2 has three pending candidates on cores 2, 3 and 4. + // All of them are being made available in this block. Propose 5 more candidates (one for + // each core) and check that they're successfully backed and the old ones enacted. + let config = default_config(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + new_test_ext(config).execute_with(|| { + // Set the elastic scaling MVP feature. + >::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::ElasticScalingMVP as u8, + true, + ) + .unwrap(); + + let dispute_statements = BTreeMap::new(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + backed_and_concluding.insert(2, 1); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 1, + code_upgrade: None, + fill_claimqueue: false, + elastic_paras: [(2, 3)].into_iter().collect(), + unavailable_cores: vec![], + }); + + let expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators) + assert_eq!(expected_para_inherent_data.bitfields.len(), 5); + // * 1 backed candidate per core (5 cores) + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 5); + // * 0 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 0); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + // The current schedule is empty prior to calling `create_inherent_enter`. + assert!(>::claimqueue_is_empty()); + + assert!(Pallet::::on_chain_votes().is_none()); + + // Nothing is filtered out (including the backed candidates.) + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + expected_para_inherent_data + ); + + assert_eq!( + // The length of this vec is equal to the number of candidates, so we know our 5 + // backed candidates did not get filtered out + Pallet::::on_chain_votes().unwrap().backing_validators_per_candidate.len(), + 5 + ); + + assert_eq!( + // The session of the on chain votes should equal the current session, which is 2 + Pallet::::on_chain_votes().unwrap().session, + 2 + ); + + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(0)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(0)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(1)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(2)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(2), CoreIndex(3), CoreIndex(4)] + ); + }); + + // ParaId 0 has one pending candidate on core 0. + // ParaId 1 has one pending candidate on core 1. + // ParaId 2 has 4 pending candidates on cores 2, 3, 4 and 5. + // Cores 1, 2 and 3 are being made available in this block. Propose 6 more candidates (one + // for each core) and check that the right ones are successfully backed and the old ones + // enacted. + let config = default_config(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + new_test_ext(config).execute_with(|| { + // Set the elastic scaling MVP feature. + >::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::ElasticScalingMVP as u8, + true, + ) + .unwrap(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + backed_and_concluding.insert(2, 1); + + // Modify the availability bitfields so that cores 0, 4 and 5 are not being made + // available. + let unavailable_cores = vec![0, 4, 5]; + + let scenario = make_inherent_data(TestConfig { + dispute_statements: BTreeMap::new(), + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 1, + code_upgrade: None, + fill_claimqueue: true, + elastic_paras: [(2, 4)].into_iter().collect(), + unavailable_cores: unavailable_cores.clone(), + }); + + let mut expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (6 validators) + assert_eq!(expected_para_inherent_data.bitfields.len(), 6); + // * 1 backed candidate per core (6 cores) + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 6); + // * 0 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 0); + assert!(Pallet::::on_chain_votes().is_none()); + + expected_para_inherent_data.backed_candidates = expected_para_inherent_data + .backed_candidates + .into_iter() + .filter(|candidate| { + let (_, Some(core_index)) = candidate.validator_indices_and_core_index(true) + else { + panic!("Core index must have been injected"); + }; + !unavailable_cores.contains(&core_index.0) + }) + .collect(); + + let mut inherent_data = InherentData::new(); + inherent_data.put_data(PARACHAINS_INHERENT_IDENTIFIER, &scenario.data).unwrap(); + + assert!(!>::claimqueue_is_empty()); + + // The right candidates have been filtered out (the ones for cores 0,4,5) + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + expected_para_inherent_data + ); + + // 3 candidates have been backed (for cores 1,2 and 3) + assert_eq!( + Pallet::::on_chain_votes().unwrap().backing_validators_per_candidate.len(), + 3 + ); + + assert_eq!( + // The session of the on chain votes should equal the current session, which is 2 + Pallet::::on_chain_votes().unwrap().session, + 2 + ); + + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(1)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(2)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(4), CoreIndex(5), CoreIndex(2), CoreIndex(3)] + ); + + let expected_heads = (0..=2) + .map(|id| { + inclusion::PendingAvailability::::get(ParaId::from(id)) + .unwrap() + .back() + .unwrap() + .candidate_commitments() + .head_data + .clone() + }) + .collect::>(); + + // Now just make all candidates available. + let mut data = scenario.data.clone(); + let validators = session_info::Pallet::::session_info(2).unwrap().validators; + let signing_context = SigningContext { + parent_hash: BenchBuilder::::header(4).hash(), + session_index: 2, + }; + + data.backed_candidates.clear(); + + data.bitfields.iter_mut().enumerate().for_each(|(i, bitfield)| { + let unchecked_signed = UncheckedSigned::::benchmark_sign( + validators.get(ValidatorIndex(i as u32)).unwrap(), + bitvec::bitvec![u8, bitvec::order::Lsb0; 1; 6].into(), + &signing_context, + ValidatorIndex(i as u32), + ); + *bitfield = unchecked_signed; + }); + let mut inherent_data = InherentData::new(); + inherent_data.put_data(PARACHAINS_INHERENT_IDENTIFIER, &data).unwrap(); + + // Nothing has been filtered out. + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + data + ); + + // No more candidates have been backed + assert!(Pallet::::on_chain_votes() + .unwrap() + .backing_validators_per_candidate + .is_empty()); + + // No more pending availability candidates + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(0)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(2)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![] + ); + + // Paras have the right on-chain heads now + expected_heads.into_iter().enumerate().for_each(|(id, head)| { + assert_eq!( + paras::Pallet::::para_head(ParaId::from(id as u32)).unwrap(), + head + ); + }); }); } @@ -255,6 +595,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -326,6 +668,8 @@ mod enter { num_validators_per_core: 6, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -395,6 +739,8 @@ mod enter { num_validators_per_core: 4, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -480,6 +826,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -565,6 +913,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -649,6 +999,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -754,6 +1106,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -820,6 +1174,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -884,6 +1240,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -985,6 +1343,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let mut para_inherent_data = scenario.data.clone(); @@ -1072,6 +1432,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -1119,7 +1481,7 @@ mod sanitizers { inclusion::tests::{ back_candidate, collator_sign_candidate, BackingKind, TestCandidateBuilder, }, - mock::{new_test_ext, MockGenesisConfig}, + mock::new_test_ext, }; use bitvec::order::Lsb0; use primitives::{ @@ -1375,9 +1737,11 @@ mod sanitizers { mod candidates { use crate::{ - mock::set_disabled_validators, + mock::{set_disabled_validators, RuntimeOrigin}, scheduler::{common::Assignment, ParasEntry}, + util::{make_persisted_validation_data, make_persisted_validation_data_with_parent}, }; + use primitives::ValidationCode; use sp_std::collections::vec_deque::VecDeque; use super::*; @@ -1385,13 +1749,14 @@ mod sanitizers { // Backed candidates and scheduled parachains used for `sanitize_backed_candidates` testing struct TestData { backed_candidates: Vec, - all_backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, + expected_backed_candidates_with_core: + BTreeMap>, scheduled_paras: BTreeMap>, } - // Generate test data for the candidates and assert that the evnironment is set as expected + // Generate test data for the candidates and assert that the environment is set as expected // (check the comments for details) - fn get_test_data(core_index_enabled: bool) -> TestData { + fn get_test_data_one_core_per_para(core_index_enabled: bool) -> TestData { const RELAY_PARENT_NUM: u32 = 3; // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing @@ -1467,6 +1832,24 @@ mod sanitizers { ), ])); + // Set the on-chain included head data for paras. + paras::Pallet::::set_current_head(ParaId::from(1), HeadData(vec![1])); + paras::Pallet::::set_current_head(ParaId::from(2), HeadData(vec![2])); + + // Set the current_code_hash + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(1), + ValidationCode(vec![1]), + ) + .unwrap(); + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(2), + ValidationCode(vec![2]), + ) + .unwrap(); + // Callback used for backing candidates let group_validators = |group_index: GroupIndex| { match group_index { @@ -1486,8 +1869,15 @@ mod sanitizers { para_id: ParaId::from(idx1), relay_parent, pov_hash: Hash::repeat_byte(idx1 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(idx1), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![idx1 as u8]), ..Default::default() } .build(); @@ -1523,36 +1913,34 @@ mod sanitizers { ] ); - let all_backed_candidates_with_core = backed_candidates - .iter() - .map(|candidate| { - // Only one entry for this test data. - ( - candidate.clone(), - scheduled - .get(&candidate.descriptor().para_id) - .unwrap() - .first() - .copied() - .unwrap(), - ) - }) - .collect(); + let mut expected_backed_candidates_with_core = BTreeMap::new(); + + for candidate in backed_candidates.iter() { + let para_id = candidate.descriptor().para_id; + + expected_backed_candidates_with_core.entry(para_id).or_insert(vec![]).push(( + candidate.clone(), + scheduled.get(¶_id).unwrap().first().copied().unwrap(), + )); + } TestData { backed_candidates, scheduled_paras: scheduled, - all_backed_candidates_with_core, + expected_backed_candidates_with_core, } } - // Generate test data for the candidates and assert that the evnironment is set as expected + // Generate test data for the candidates and assert that the environment is set as expected // (check the comments for details) // Para 1 scheduled on core 0 and core 1. Two candidates are supplied. // Para 2 scheduled on cores 2 and 3. One candidate supplied. // Para 3 scheduled on core 4. One candidate supplied. // Para 4 scheduled on core 5. Two candidates supplied. // Para 5 scheduled on core 6. No candidates supplied. + // Para 6 is not scheduled. One candidate supplied. + // Para 7 is scheduled on core 7 and 8, but the candidate contains the wrong core index. + // Para 8 is scheduled on core 9, but the candidate contains the wrong core index. fn get_test_data_multiple_cores_per_para(core_index_enabled: bool) -> TestData { const RELAY_PARENT_NUM: u32 = 3; @@ -1581,6 +1969,7 @@ mod sanitizers { keyring::Sr25519Keyring::Eve, keyring::Sr25519Keyring::Ferdie, keyring::Sr25519Keyring::One, + keyring::Sr25519Keyring::Two, ]; for validator in validators.iter() { Keystore::sr25519_generate_new( @@ -1605,6 +1994,7 @@ mod sanitizers { vec![ValidatorIndex(4)], vec![ValidatorIndex(5)], vec![ValidatorIndex(6)], + vec![ValidatorIndex(7)], ]); // Update scheduler's claimqueue with the parachains @@ -1658,8 +2048,40 @@ mod sanitizers { RELAY_PARENT_NUM, )]), ), + ( + CoreIndex::from(7), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 7.into(), core_index: CoreIndex(7) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(8), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 7.into(), core_index: CoreIndex(8) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(9), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 8.into(), core_index: CoreIndex(9) }, + RELAY_PARENT_NUM, + )]), + ), ])); + // Set the on-chain included head data and current code hash. + for id in 1..=8u32 { + paras::Pallet::::set_current_head(ParaId::from(id), HeadData(vec![id as u8])); + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(id), + ValidationCode(vec![id as u8]), + ) + .unwrap(); + } + // Callback used for backing candidates let group_validators = |group_index: GroupIndex| { match group_index { @@ -1670,6 +2092,7 @@ mod sanitizers { group_index if group_index == GroupIndex::from(4) => Some(vec![4]), group_index if group_index == GroupIndex::from(5) => Some(vec![5]), group_index if group_index == GroupIndex::from(6) => Some(vec![6]), + group_index if group_index == GroupIndex::from(7) => Some(vec![7]), _ => panic!("Group index out of bounds"), } @@ -1677,7 +2100,7 @@ mod sanitizers { }; let mut backed_candidates = vec![]; - let mut all_backed_candidates_with_core = vec![]; + let mut expected_backed_candidates_with_core = BTreeMap::new(); // Para 1 { @@ -1685,14 +2108,23 @@ mod sanitizers { para_id: ParaId::from(1), relay_parent, pov_hash: Hash::repeat_byte(1 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(1), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![1, 1]), + validation_code: ValidationCode(vec![1]), ..Default::default() } .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + let prev_candidate = candidate.clone(); let backed: BackedCandidate = back_candidate( candidate, &validators, @@ -1704,15 +2136,26 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core.push((backed, CoreIndex(0))); + expected_backed_candidates_with_core + .entry(ParaId::from(1)) + .or_insert(vec![]) + .push((backed, CoreIndex(0))); } let mut candidate = TestCandidateBuilder { para_id: ParaId::from(1), relay_parent, pov_hash: Hash::repeat_byte(2 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![1]), ..Default::default() } .build(); @@ -1730,7 +2173,10 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core.push((backed, CoreIndex(1))); + expected_backed_candidates_with_core + .entry(ParaId::from(1)) + .or_insert(vec![]) + .push((backed, CoreIndex(1))); } } @@ -1740,8 +2186,15 @@ mod sanitizers { para_id: ParaId::from(2), relay_parent, pov_hash: Hash::repeat_byte(3 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(2), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), ..Default::default() } .build(); @@ -1759,7 +2212,10 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core.push((backed, CoreIndex(2))); + expected_backed_candidates_with_core + .entry(ParaId::from(2)) + .or_insert(vec![]) + .push((backed, CoreIndex(2))); } } @@ -1769,8 +2225,15 @@ mod sanitizers { para_id: ParaId::from(3), relay_parent, pov_hash: Hash::repeat_byte(4 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(3), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![3]), ..Default::default() } .build(); @@ -1787,7 +2250,10 @@ mod sanitizers { core_index_enabled.then_some(CoreIndex(4 as u32)), ); backed_candidates.push(backed.clone()); - all_backed_candidates_with_core.push((backed, CoreIndex(4))); + expected_backed_candidates_with_core + .entry(ParaId::from(3)) + .or_insert(vec![]) + .push((backed, CoreIndex(4))); } // Para 4 @@ -1796,14 +2262,22 @@ mod sanitizers { para_id: ParaId::from(4), relay_parent, pov_hash: Hash::repeat_byte(5 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(4), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![4]), ..Default::default() } .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + let prev_candidate = candidate.clone(); let backed = back_candidate( candidate, &validators, @@ -1811,17 +2285,28 @@ mod sanitizers { &keystore, &signing_context, BackingKind::Threshold, - None, + core_index_enabled.then_some(CoreIndex(5 as u32)), ); backed_candidates.push(backed.clone()); - all_backed_candidates_with_core.push((backed, CoreIndex(5))); + expected_backed_candidates_with_core + .entry(ParaId::from(4)) + .or_insert(vec![]) + .push((backed, CoreIndex(5))); let mut candidate = TestCandidateBuilder { para_id: ParaId::from(4), relay_parent, pov_hash: Hash::repeat_byte(6 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![4]), ..Default::default() } .build(); @@ -1842,25 +2327,635 @@ mod sanitizers { // No candidate for para 5. - // State sanity checks - assert_eq!( - >::scheduled_paras().collect::>(), - vec![ - (CoreIndex(0), ParaId::from(1)), - (CoreIndex(1), ParaId::from(1)), - (CoreIndex(2), ParaId::from(2)), - (CoreIndex(3), ParaId::from(2)), - (CoreIndex(4), ParaId::from(3)), - (CoreIndex(5), ParaId::from(4)), - (CoreIndex(6), ParaId::from(5)), - ] - ); - let mut scheduled: BTreeMap> = BTreeMap::new(); - for (core_idx, para_id) in >::scheduled_paras() { - scheduled.entry(para_id).or_default().insert(core_idx); + // Para 6. + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(6), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(6), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![6]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(6 as u32)), + ); + backed_candidates.push(backed.clone()); } - assert_eq!( + // Para 7. + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(7), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(7), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![7]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(6 as u32)), + ); + backed_candidates.push(backed.clone()); + } + + // Para 8. + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(8), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(8), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![8]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(7 as u32)), + ); + backed_candidates.push(backed.clone()); + if !core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(8)) + .or_insert(vec![]) + .push((backed, CoreIndex(9))); + } + } + + // State sanity checks + assert_eq!( + >::scheduled_paras().collect::>(), + vec![ + (CoreIndex(0), ParaId::from(1)), + (CoreIndex(1), ParaId::from(1)), + (CoreIndex(2), ParaId::from(2)), + (CoreIndex(3), ParaId::from(2)), + (CoreIndex(4), ParaId::from(3)), + (CoreIndex(5), ParaId::from(4)), + (CoreIndex(6), ParaId::from(5)), + (CoreIndex(7), ParaId::from(7)), + (CoreIndex(8), ParaId::from(7)), + (CoreIndex(9), ParaId::from(8)), + ] + ); + let mut scheduled: BTreeMap> = BTreeMap::new(); + for (core_idx, para_id) in >::scheduled_paras() { + scheduled.entry(para_id).or_default().insert(core_idx); + } + + assert_eq!( + shared::Pallet::::active_validator_indices(), + vec![ + ValidatorIndex(0), + ValidatorIndex(1), + ValidatorIndex(2), + ValidatorIndex(3), + ValidatorIndex(4), + ValidatorIndex(5), + ValidatorIndex(6), + ValidatorIndex(7), + ] + ); + + TestData { + backed_candidates, + scheduled_paras: scheduled, + expected_backed_candidates_with_core, + } + } + + // Para 1 scheduled on core 0 and core 1. Two candidates are supplied. They form a chain but + // in the wrong order. + // Para 2 scheduled on core 2, core 3 and core 4. Three candidates are supplied. The second + // one is not part of the chain. + // Para 3 scheduled on core 5 and 6. Two candidates are supplied and they all form a chain. + // Para 4 scheduled on core 7 and 8. Duplicated candidates. + fn get_test_data_for_order_checks(core_index_enabled: bool) -> TestData { + const RELAY_PARENT_NUM: u32 = 3; + + // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing + // votes) won't behave correctly + shared::Pallet::::add_allowed_relay_parent( + default_header().hash(), + Default::default(), + RELAY_PARENT_NUM, + 1, + ); + + let header = default_header(); + let relay_parent = header.hash(); + let session_index = SessionIndex::from(0_u32); + + let keystore = LocalKeystore::in_memory(); + let keystore = Arc::new(keystore) as KeystorePtr; + let signing_context = SigningContext { parent_hash: relay_parent, session_index }; + + let validators = vec![ + keyring::Sr25519Keyring::Alice, + keyring::Sr25519Keyring::Bob, + keyring::Sr25519Keyring::Charlie, + keyring::Sr25519Keyring::Dave, + keyring::Sr25519Keyring::Eve, + keyring::Sr25519Keyring::Ferdie, + keyring::Sr25519Keyring::One, + keyring::Sr25519Keyring::Two, + keyring::Sr25519Keyring::AliceStash, + ]; + for validator in validators.iter() { + Keystore::sr25519_generate_new( + &*keystore, + PARACHAIN_KEY_TYPE_ID, + Some(&validator.to_seed()), + ) + .unwrap(); + } + + // Set active validators in `shared` pallet + let validator_ids = + validators.iter().map(|v| v.public().into()).collect::>(); + shared::Pallet::::set_active_validators_ascending(validator_ids); + + // Set the validator groups in `scheduler` + scheduler::Pallet::::set_validator_groups(vec![ + vec![ValidatorIndex(0)], + vec![ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3)], + vec![ValidatorIndex(4)], + vec![ValidatorIndex(5)], + vec![ValidatorIndex(6)], + vec![ValidatorIndex(7)], + vec![ValidatorIndex(8)], + ]); + + // Update scheduler's claimqueue with the parachains + scheduler::Pallet::::set_claimqueue(BTreeMap::from([ + ( + CoreIndex::from(0), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(1), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(2), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(2) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(3), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(4), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(4) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(5), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(5) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(6), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(6) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(7), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(7) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(8), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(8) }, + RELAY_PARENT_NUM, + )]), + ), + ])); + + // Set the on-chain included head data and current code hash. + for id in 1..=4u32 { + paras::Pallet::::set_current_head(ParaId::from(id), HeadData(vec![id as u8])); + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(id), + ValidationCode(vec![id as u8]), + ) + .unwrap(); + } + + // Callback used for backing candidates + let group_validators = |group_index: GroupIndex| { + match group_index { + group_index if group_index == GroupIndex::from(0) => Some(vec![0]), + group_index if group_index == GroupIndex::from(1) => Some(vec![1]), + group_index if group_index == GroupIndex::from(2) => Some(vec![2]), + group_index if group_index == GroupIndex::from(3) => Some(vec![3]), + group_index if group_index == GroupIndex::from(4) => Some(vec![4]), + group_index if group_index == GroupIndex::from(5) => Some(vec![5]), + group_index if group_index == GroupIndex::from(6) => Some(vec![6]), + group_index if group_index == GroupIndex::from(7) => Some(vec![7]), + group_index if group_index == GroupIndex::from(8) => Some(vec![8]), + + _ => panic!("Group index out of bounds"), + } + .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) + }; + + let mut backed_candidates = vec![]; + let mut expected_backed_candidates_with_core = BTreeMap::new(); + + // Para 1 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(1), + relay_parent, + pov_hash: Hash::repeat_byte(1 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(1), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![1, 1]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![1]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let prev_candidate = candidate.clone(); + let prev_backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(0 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(0 as u32)), + ); + + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(1), + relay_parent, + pov_hash: Hash::repeat_byte(2 as u8), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![1]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(1 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(1 as u32)), + ); + backed_candidates.push(backed.clone()); + backed_candidates.push(prev_backed.clone()); + } + + // Para 2. + { + let mut candidate_1 = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(2), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![2, 2]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_1); + + let backed_1: BackedCandidate = back_candidate( + candidate_1, + &validators, + group_validators(GroupIndex::from(2 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(2 as u32)), + ); + + backed_candidates.push(backed_1.clone()); + if core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(2)) + .or_insert(vec![]) + .push((backed_1, CoreIndex(2))); + } + + let mut candidate_2 = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(4 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(2), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), + head_data: HeadData(vec![3, 3]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_2); + + let backed_2 = back_candidate( + candidate_2.clone(), + &validators, + group_validators(GroupIndex::from(3 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(3 as u32)), + ); + backed_candidates.push(backed_2.clone()); + + let mut candidate_3 = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(5 as u8), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + candidate_2.commitments.head_data, + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_3); + + let backed_3 = back_candidate( + candidate_3, + &validators, + group_validators(GroupIndex::from(4 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(4 as u32)), + ); + backed_candidates.push(backed_3.clone()); + } + + // Para 3 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(3), + relay_parent, + pov_hash: Hash::repeat_byte(6 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(3), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![3, 3]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![3]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let prev_candidate = candidate.clone(); + let backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(5 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(5 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(3)) + .or_insert(vec![]) + .push((backed, CoreIndex(5))); + } + + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(3), + relay_parent, + pov_hash: Hash::repeat_byte(6 as u8), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![3]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(6 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(3)) + .or_insert(vec![]) + .push((backed, CoreIndex(6))); + } + } + + // Para 4 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(4), + relay_parent, + pov_hash: Hash::repeat_byte(8 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(4), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![4]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![4]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed: BackedCandidate = back_candidate( + candidate.clone(), + &validators, + group_validators(GroupIndex::from(7 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(7 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(4)) + .or_insert(vec![]) + .push((backed, CoreIndex(7))); + } + + let backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(7 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(8 as u32)), + ); + backed_candidates.push(backed.clone()); + } + + // State sanity checks + assert_eq!( + >::scheduled_paras().collect::>(), + vec![ + (CoreIndex(0), ParaId::from(1)), + (CoreIndex(1), ParaId::from(1)), + (CoreIndex(2), ParaId::from(2)), + (CoreIndex(3), ParaId::from(2)), + (CoreIndex(4), ParaId::from(2)), + (CoreIndex(5), ParaId::from(3)), + (CoreIndex(6), ParaId::from(3)), + (CoreIndex(7), ParaId::from(4)), + (CoreIndex(8), ParaId::from(4)), + ] + ); + let mut scheduled: BTreeMap> = BTreeMap::new(); + for (core_idx, para_id) in >::scheduled_paras() { + scheduled.entry(para_id).or_default().insert(core_idx); + } + + assert_eq!( shared::Pallet::::active_validator_indices(), vec![ ValidatorIndex(0), @@ -1870,43 +2965,38 @@ mod sanitizers { ValidatorIndex(4), ValidatorIndex(5), ValidatorIndex(6), + ValidatorIndex(7), + ValidatorIndex(8), ] ); TestData { backed_candidates, scheduled_paras: scheduled, - all_backed_candidates_with_core, + expected_backed_candidates_with_core, } } #[rstest] #[case(false)] #[case(true)] - fn happy_path(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + fn happy_path_one_core_per_para(#[case] core_index_enabled: bool) { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, - all_backed_candidates_with_core, + expected_backed_candidates_with_core, scheduled_paras: scheduled, - } = get_test_data(core_index_enabled); - - let has_concluded_invalid = - |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; + } = get_test_data_one_core_per_para(core_index_enabled); assert_eq!( - sanitize_backed_candidates::( + sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + BTreeSet::new(), scheduled, core_index_enabled ), - SanitizedBackedCandidates { - backed_candidates_with_core: all_backed_candidates_with_core, - votes_from_disabled_were_dropped: false, - dropped_unscheduled_candidates: false - } + expected_backed_candidates_with_core, ); }); } @@ -1915,29 +3005,46 @@ mod sanitizers { #[case(false)] #[case(true)] fn test_with_multiple_cores_per_para(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, - all_backed_candidates_with_core: expected_all_backed_candidates_with_core, + expected_backed_candidates_with_core, scheduled_paras: scheduled, } = get_test_data_multiple_cores_per_para(core_index_enabled); - let has_concluded_invalid = - |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; - assert_eq!( - sanitize_backed_candidates::( + sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + BTreeSet::new(), scheduled, core_index_enabled ), - SanitizedBackedCandidates { - backed_candidates_with_core: expected_all_backed_candidates_with_core, - votes_from_disabled_were_dropped: false, - dropped_unscheduled_candidates: true - } + expected_backed_candidates_with_core, + ); + }); + } + + #[rstest] + #[case(false)] + #[case(true)] + fn test_candidate_ordering(#[case] core_index_enabled: bool) { + new_test_ext(default_config()).execute_with(|| { + let TestData { + backed_candidates, + scheduled_paras: scheduled, + expected_backed_candidates_with_core, + } = get_test_data_for_order_checks(core_index_enabled); + + assert_eq!( + sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + BTreeSet::new(), + scheduled, + core_index_enabled, + ), + expected_backed_candidates_with_core ); }); } @@ -1952,31 +3059,23 @@ mod sanitizers { #[case] core_index_enabled: bool, #[case] multiple_cores_per_para: bool, ) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, .. } = if multiple_cores_per_para { get_test_data_multiple_cores_per_para(core_index_enabled) } else { - get_test_data(core_index_enabled) + get_test_data_one_core_per_para(core_index_enabled) }; let scheduled = BTreeMap::new(); - let has_concluded_invalid = - |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; - - let SanitizedBackedCandidates { - backed_candidates_with_core: sanitized_backed_candidates, - votes_from_disabled_were_dropped, - dropped_unscheduled_candidates, - } = sanitize_backed_candidates::( + + let sanitized_backed_candidates = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + BTreeSet::new(), scheduled, core_index_enabled, ); assert!(sanitized_backed_candidates.is_empty()); - assert!(!votes_from_disabled_were_dropped); - assert!(dropped_unscheduled_candidates); }); } @@ -1984,14 +3083,16 @@ mod sanitizers { #[rstest] #[case(false)] #[case(true)] - fn invalid_are_filtered_out(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + fn concluded_invalid_are_filtered_out_single_core_per_para( + #[case] core_index_enabled: bool, + ) { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, scheduled_paras: scheduled, .. } = - get_test_data(core_index_enabled); + get_test_data_one_core_per_para(core_index_enabled); // mark every second one as concluded invalid let set = { - let mut set = std::collections::HashSet::new(); + let mut set = std::collections::BTreeSet::new(); for (idx, backed_candidate) in backed_candidates.iter().enumerate() { if idx & 0x01 == 0 { set.insert(backed_candidate.hash()); @@ -1999,23 +3100,98 @@ mod sanitizers { } set }; - let has_concluded_invalid = - |_idx: usize, candidate: &BackedCandidate| set.contains(&candidate.hash()); - let SanitizedBackedCandidates { - backed_candidates_with_core: sanitized_backed_candidates, - votes_from_disabled_were_dropped, - dropped_unscheduled_candidates, - } = sanitize_backed_candidates::( + let sanitized_backed_candidates: BTreeMap< + ParaId, + Vec<(BackedCandidate<_>, CoreIndex)>, + > = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + set, scheduled, core_index_enabled, ); assert_eq!(sanitized_backed_candidates.len(), backed_candidates.len() / 2); - assert!(!votes_from_disabled_were_dropped); - assert!(!dropped_unscheduled_candidates); + }); + } + + // candidates that have concluded as invalid are filtered out, as well as their descendants. + #[test] + fn concluded_invalid_are_filtered_out_multiple_cores_per_para() { + // Mark the first candidate of paraid 1 as invalid. Its descendant should also + // be dropped. Also mark the candidate of paraid 3 as invalid. + new_test_ext(default_config()).execute_with(|| { + let TestData { + backed_candidates, + scheduled_paras: scheduled, + mut expected_backed_candidates_with_core, + .. + } = get_test_data_multiple_cores_per_para(true); + + let mut invalid_set = std::collections::BTreeSet::new(); + + for (idx, backed_candidate) in backed_candidates.iter().enumerate() { + if backed_candidate.descriptor().para_id == ParaId::from(1) && idx == 0 { + invalid_set.insert(backed_candidate.hash()); + } else if backed_candidate.descriptor().para_id == ParaId::from(3) { + invalid_set.insert(backed_candidate.hash()); + } + } + let sanitized_backed_candidates: BTreeMap< + ParaId, + Vec<(BackedCandidate<_>, CoreIndex)>, + > = sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + invalid_set, + scheduled, + true, + ); + + // We'll be left with candidates from paraid 2 and 4. + + expected_backed_candidates_with_core.remove(&ParaId::from(1)).unwrap(); + expected_backed_candidates_with_core.remove(&ParaId::from(3)).unwrap(); + + assert_eq!(sanitized_backed_candidates, sanitized_backed_candidates); + }); + + // Mark the second candidate of paraid 1 as invalid. Its predecessor should be left + // in place. + new_test_ext(default_config()).execute_with(|| { + let TestData { + backed_candidates, + scheduled_paras: scheduled, + mut expected_backed_candidates_with_core, + .. + } = get_test_data_multiple_cores_per_para(true); + + let mut invalid_set = std::collections::BTreeSet::new(); + + for (idx, backed_candidate) in backed_candidates.iter().enumerate() { + if backed_candidate.descriptor().para_id == ParaId::from(1) && idx == 1 { + invalid_set.insert(backed_candidate.hash()); + } + } + let sanitized_backed_candidates: BTreeMap< + ParaId, + Vec<(BackedCandidate<_>, CoreIndex)>, + > = sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + invalid_set, + scheduled, + true, + ); + + // Only the second candidate of paraid 1 should be removed. + expected_backed_candidates_with_core + .get_mut(&ParaId::from(1)) + .unwrap() + .remove(1); + + // We'll be left with candidates from paraid 1, 2, 3 and 4. + assert_eq!(sanitized_backed_candidates, expected_backed_candidates_with_core); }); } @@ -2023,34 +3199,35 @@ mod sanitizers { #[case(false)] #[case(true)] fn disabled_non_signing_validator_doesnt_get_filtered(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = - get_test_data(core_index_enabled); + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_one_core_per_para(core_index_enabled); // Disable Eve set_disabled_validators(vec![4]); - let before = all_backed_candidates_with_core.clone(); + let before = expected_backed_candidates_with_core.clone(); // Eve is disabled but no backing statement is signed by it so nothing should be // filtered - assert!(!filter_backed_statements_from_disabled_validators::( - &mut all_backed_candidates_with_core, + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, &>::allowed_relay_parents(), - core_index_enabled - )); - assert_eq!(all_backed_candidates_with_core, before); + core_index_enabled, + ); + assert_eq!(expected_backed_candidates_with_core, before); }); } + #[rstest] #[case(false)] #[case(true)] fn drop_statements_from_disabled_without_dropping_candidate( #[case] core_index_enabled: bool, ) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = - get_test_data(core_index_enabled); + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_one_core_per_para(core_index_enabled); // Disable Alice set_disabled_validators(vec![0]); @@ -2064,11 +3241,22 @@ mod sanitizers { // Verify the initial state is as expected assert_eq!( - all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .validity_votes() + .len(), 2 ); - let (validator_indices, maybe_core_index) = all_backed_candidates_with_core - .get(0) + let (validator_indices, maybe_core_index) = expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() .unwrap() .0 .validator_indices_and_core_index(core_index_enabled); @@ -2080,16 +3268,28 @@ mod sanitizers { assert_eq!(validator_indices.get(0).unwrap(), true); assert_eq!(validator_indices.get(1).unwrap(), true); - let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); + let untouched = expected_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .clone(); - assert!(filter_backed_statements_from_disabled_validators::( - &mut all_backed_candidates_with_core, + let before = expected_backed_candidates_with_core.clone(); + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, &>::allowed_relay_parents(), - core_index_enabled - )); + core_index_enabled, + ); + assert_eq!(before.len(), expected_backed_candidates_with_core.len()); - let (validator_indices, maybe_core_index) = all_backed_candidates_with_core - .get(0) + let (validator_indices, maybe_core_index) = expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() .unwrap() .0 .validator_indices_and_core_index(core_index_enabled); @@ -2100,47 +3300,137 @@ mod sanitizers { } // there should still be two backed candidates - assert_eq!(all_backed_candidates_with_core.len(), 2); + assert_eq!(expected_backed_candidates_with_core.len(), 2); // but the first one should have only one validity vote assert_eq!( - all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .validity_votes() + .len(), 1 ); // Validator 0 vote should be dropped, validator 1 - retained assert_eq!(validator_indices.get(0).unwrap(), false); assert_eq!(validator_indices.get(1).unwrap(), true); // the second candidate shouldn't be modified - assert_eq!(all_backed_candidates_with_core.get(1).unwrap().0, untouched); + assert_eq!( + expected_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0, + untouched + ); }); } #[rstest] #[case(false)] #[case(true)] - fn drop_candidate_if_all_statements_are_from_disabled(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = - get_test_data(core_index_enabled); + fn drop_candidate_if_all_statements_are_from_disabled_single_core_per_para( + #[case] core_index_enabled: bool, + ) { + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_one_core_per_para(core_index_enabled); // Disable Alice and Bob set_disabled_validators(vec![0, 1]); // Verify the initial state is as expected assert_eq!( - all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .validity_votes() + .len(), 2 ); - let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); + let untouched = expected_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .clone(); - assert!(filter_backed_statements_from_disabled_validators::( - &mut all_backed_candidates_with_core, + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, &>::allowed_relay_parents(), - core_index_enabled - )); + core_index_enabled, + ); + + assert_eq!(expected_backed_candidates_with_core.len(), 1); + assert_eq!( + expected_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0, + untouched + ); + assert_eq!(expected_backed_candidates_with_core.get(&ParaId::from(1)), None); + }); + } + + #[test] + fn drop_candidate_if_all_statements_are_from_disabled_multiple_cores_per_para() { + // Disable Bob, only the second candidate of paraid 1 should be removed. + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_multiple_cores_per_para(true); - assert_eq!(all_backed_candidates_with_core.len(), 1); - assert_eq!(all_backed_candidates_with_core.get(0).unwrap().0, untouched); + set_disabled_validators(vec![1]); + + let mut untouched = expected_backed_candidates_with_core.clone(); + + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, + &>::allowed_relay_parents(), + true, + ); + + untouched.get_mut(&ParaId::from(1)).unwrap().remove(1); + + assert_eq!(expected_backed_candidates_with_core, untouched); }); + + // Disable Alice or disable both Alice and Bob, all candidates of paraid 1 should be + // removed. + for disabled in [vec![0], vec![0, 1]] { + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_multiple_cores_per_para(true); + + set_disabled_validators(disabled); + + let mut untouched = expected_backed_candidates_with_core.clone(); + + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, + &>::allowed_relay_parents(), + true, + ); + + untouched.remove(&ParaId::from(1)).unwrap(); + + assert_eq!(expected_backed_candidates_with_core, untouched); + }); + } } } } diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs index 1bbd4dfb716..171f3f746a8 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -22,6 +22,7 @@ use crate::{ scheduler::{self, CoreOccupied}, session_info, shared, }; +use frame_support::traits::{GetStorageVersion, StorageVersion}; use frame_system::pallet_prelude::*; use primitives::{ async_backing::{ @@ -92,16 +93,41 @@ pub fn availability_cores() -> Vec { - let pending_availability = - >::pending_availability(entry.para_id()) - .expect("Occupied core always has pending availability; qed"); - - let backed_in_number = *pending_availability.backed_in_number(); + // Due to https://github.com/paritytech/polkadot-sdk/issues/64, using the new storage types would cause + // this runtime API to panic. We explicitly handle the storage for version 0 to + // prevent that. When removing the inclusion v0 -> v1 migration, this bit of code + // can also be removed. + let pending_availability = if >::on_chain_storage_version() == + StorageVersion::new(0) + { + inclusion::migration::v0::PendingAvailability::::get(entry.para_id()) + .expect("Occupied core always has pending availability; qed") + } else { + let candidate = >::pending_availability_with_core( + entry.para_id(), + CoreIndex(i as u32), + ) + .expect("Occupied core always has pending availability; qed"); + + // Translate to the old candidate format, as we don't need the commitments now. + inclusion::migration::v0::CandidatePendingAvailability { + core: candidate.core_occupied(), + hash: candidate.candidate_hash(), + descriptor: candidate.candidate_descriptor().clone(), + availability_votes: candidate.availability_votes().clone(), + backers: candidate.backers().clone(), + relay_parent_number: candidate.relay_parent_number(), + backed_in_number: candidate.backed_in_number(), + backing_group: candidate.backing_group(), + } + }; + + let backed_in_number = pending_availability.backed_in_number; // Use the same block number for determining the responsible group as what the // backing subsystem would use when it calls validator_groups api. let backing_group_allocation_time = - pending_availability.relay_parent_number() + One::one(); + pending_availability.relay_parent_number + One::one(); CoreState::Occupied(OccupiedCore { next_up_on_available: >::next_up_on_available(CoreIndex( i as u32, @@ -111,13 +137,13 @@ pub fn availability_cores() -> Vec>::next_up_on_time_out(CoreIndex( i as u32, )), - availability: pending_availability.availability_votes().clone(), + availability: pending_availability.availability_votes.clone(), group_responsible: group_responsible_for( backing_group_allocation_time, - pending_availability.core_occupied(), + pending_availability.core, ), - candidate_hash: pending_availability.candidate_hash(), - candidate_descriptor: pending_availability.candidate_descriptor().clone(), + candidate_hash: pending_availability.hash, + candidate_descriptor: pending_availability.descriptor, }) }, CoreOccupied::Free => { @@ -200,8 +226,8 @@ pub fn assumed_validation_data( }; let persisted_validation_data = make_validation_data().or_else(|| { - // Try again with force enacting the core. This check only makes sense if - // the core is occupied. + // Try again with force enacting the pending candidates. This check only makes sense if + // there are any pending candidates. >::pending_availability(para_id).and_then(|_| { >::force_enact(para_id); make_validation_data() @@ -465,27 +491,23 @@ pub fn backing_state( }; let pending_availability = { - // Note: the API deals with a `Vec` as it is future-proof for cases - // where there may be multiple candidates pending availability at a time. - // But at the moment only one candidate can be pending availability per - // parachain. crate::inclusion::PendingAvailability::::get(¶_id) - .and_then(|pending| { - let commitments = - crate::inclusion::PendingAvailabilityCommitments::::get(¶_id); - commitments.map(move |c| (pending, c)) - }) - .map(|(pending, commitments)| { - CandidatePendingAvailability { - candidate_hash: pending.candidate_hash(), - descriptor: pending.candidate_descriptor().clone(), - commitments, - relay_parent_number: pending.relay_parent_number(), - max_pov_size: constraints.max_pov_size, // assume always same in session. - } + .map(|pending_candidates| { + pending_candidates + .into_iter() + .map(|candidate| { + CandidatePendingAvailability { + candidate_hash: candidate.candidate_hash(), + descriptor: candidate.candidate_descriptor().clone(), + commitments: candidate.candidate_commitments().clone(), + relay_parent_number: candidate.relay_parent_number(), + max_pov_size: constraints.max_pov_size, /* assume always same in + * session. */ + } + }) + .collect() }) - .into_iter() - .collect() + .unwrap_or_else(|| vec![]) }; Some(BackingState { constraints, pending_availability }) diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index f231864a85e..25840d9707d 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -398,16 +398,6 @@ impl Pallet { }); } - /// Get the para (chain or thread) ID assigned to a particular core or index, if any. Core - /// indices out of bounds will return `None`, as will indices of unassigned cores. - pub(crate) fn core_para(core_index: CoreIndex) -> Option { - let cores = AvailabilityCores::::get(); - match cores.get(core_index.0 as usize) { - None | Some(CoreOccupied::Free) => None, - Some(CoreOccupied::Paras(entry)) => Some(entry.para_id()), - } - } - /// Get the validators in the given group, if the group index is valid for this session. pub(crate) fn group_validators(group_index: GroupIndex) -> Option> { ValidatorGroups::::get().get(group_index.0 as usize).map(|g| g.clone()) diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index aa07ef08005..493a9d055ef 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -18,7 +18,7 @@ //! on all modules. use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{Id as ParaId, PersistedValidationData, ValidatorIndex}; +use primitives::{HeadData, Id as ParaId, PersistedValidationData, ValidatorIndex}; use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; use crate::{configuration, hrmp, paras}; @@ -42,6 +42,23 @@ pub fn make_persisted_validation_data( }) } +/// Make the persisted validation data for a particular parachain, a specified relay-parent, its +/// storage root and parent head data. +pub fn make_persisted_validation_data_with_parent( + relay_parent_number: BlockNumberFor, + relay_parent_storage_root: T::Hash, + parent_head: HeadData, +) -> PersistedValidationData> { + let config = >::config(); + + PersistedValidationData { + parent_head, + relay_parent_number, + relay_parent_storage_root, + max_pov_size: config.max_pov_size, + } +} + /// Take an active subset of a set containing all validators. /// /// First item in pair will be all items in set have indices found in the `active` indices set (in diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index c9f5d81d286..90824a2f6f0 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1663,6 +1663,8 @@ pub mod migrations { // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + + parachains_inclusion::migration::MigrateToV1, ); } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs index a102d1903b2..c250c86665b 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs @@ -13,161 +13,322 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . + //! Autogenerated weights for `runtime_parachains::paras_inherent` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-11-20, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 128 +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/release/polkadot +// target/production/polkadot // benchmark -// --chain=rococo-dev +// pallet // --steps=50 // --repeat=20 -// --pallet=runtime_parachains::paras_inherent // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs -// --header=./file_header.txt +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=runtime_parachains::paras_inherent +// --chain=rococo-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras_inherent`. pub struct WeightInfo(PhantomData); impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParaSessionInfo Sessions (r:1 w:0) - // Storage: ParasDisputes Disputes (r:1 w:1) - // Storage: ParasDisputes Included (r:1 w:1) - // Storage: ParasDisputes SpamSlots (r:1 w:1) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaSessionInfo::Sessions` (r:1 w:0) + /// Proof: `ParaSessionInfo::Sessions` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:1) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::BackersOnDisputes` (r:1 w:1) + /// Proof: `ParasDisputes::BackersOnDisputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:1 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `v` is `[10, 200]`. fn enter_variable_disputes(v: u32, ) -> Weight { - Weight::from_parts(352_590_000 as u64, 0) - // Standard Error: 13_000 - .saturating_add(Weight::from_parts(49_254_000 as u64, 0).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(24 as u64)) - .saturating_add(T::DbWeight::get().writes(16 as u64)) + // Proof Size summary in bytes: + // Measured: `67785` + // Estimated: `73725 + v * (23 ±0)` + // Minimum execution time: 949_716_000 picoseconds. + Weight::from_parts(482_361_515, 0) + .saturating_add(Weight::from_parts(0, 73725)) + // Standard Error: 17_471 + .saturating_add(Weight::from_parts(50_100_764, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(25)) + .saturating_add(T::DbWeight::get().writes(15)) + .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:1 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInclusion AvailabilityBitfields (r:0 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_bitfields() -> Weight { - Weight::from_parts(299_878_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(21 as u64)) - .saturating_add(T::DbWeight::get().writes(15 as u64)) + // Proof Size summary in bytes: + // Measured: `42757` + // Estimated: `48697` + // Minimum execution time: 437_627_000 picoseconds. + Weight::from_parts(460_975_000, 0) + .saturating_add(Weight::from_parts(0, 48697)) + .saturating_add(T::DbWeight::get().reads(23)) + .saturating_add(T::DbWeight::get().writes(15)) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:2 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Paras PastCodeMeta (r:1 w:0) - // Storage: Paras CurrentCodeHash (r:1 w:0) - // Storage: Ump RelayDispatchQueueSize (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) - fn enter_backed_candidates_variable(_v: u32) -> Weight { - Weight::from_parts(442_472_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(25 as u64)) - .saturating_add(T::DbWeight::get().writes(14 as u64)) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `v` is `[101, 200]`. + fn enter_backed_candidates_variable(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `42829` + // Estimated: `48769` + // Minimum execution time: 1_305_254_000 picoseconds. + Weight::from_parts(1_347_160_667, 0) + .saturating_add(Weight::from_parts(0, 48769)) + // Standard Error: 22_128 + .saturating_add(Weight::from_parts(57_229, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(26)) + .saturating_add(T::DbWeight::get().writes(15)) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:2 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Paras PastCodeMeta (r:1 w:0) - // Storage: Paras CurrentCodeHash (r:1 w:0) - // Storage: Ump RelayDispatchQueueSize (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_backed_candidate_code_upgrade() -> Weight { - Weight::from_parts(36_903_411_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(25 as u64)) - .saturating_add(T::DbWeight::get().writes(14 as u64)) + // Proof Size summary in bytes: + // Measured: `42842` + // Estimated: `48782` + // Minimum execution time: 38_637_547_000 picoseconds. + Weight::from_parts(41_447_412_000, 0) + .saturating_add(Weight::from_parts(0, 48782)) + .saturating_add(T::DbWeight::get().reads(28)) + .saturating_add(T::DbWeight::get().writes(15)) } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 83d47508c7c..664044b713e 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1781,6 +1781,7 @@ pub mod migrations { crate::xcm_config::XcmRouter, GetLegacyLeaseImpl, >, + parachains_inclusion::migration::MigrateToV1, ); } diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs index 0dd64f054d0..aa99ac9438c 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::paras_inherent` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot @@ -29,14 +29,13 @@ // --steps=50 // --repeat=20 // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::paras_inherent // --chain=westend-dev -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,297 +48,311 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras_inherent`. pub struct WeightInfo(PhantomData); impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaSessionInfo Sessions (r:1 w:0) - /// Proof Skipped: ParaSessionInfo Sessions (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:1) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes BackersOnDisputes (r:1 w:1) - /// Proof Skipped: ParasDisputes BackersOnDisputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Included (r:1 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaSessionInfo::Sessions` (r:1 w:0) + /// Proof: `ParaSessionInfo::Sessions` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:1) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::BackersOnDisputes` (r:1 w:1) + /// Proof: `ParasDisputes::BackersOnDisputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:1 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `v` is `[10, 200]`. fn enter_variable_disputes(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `50518` - // Estimated: `56458 + v * (23 ±0)` - // Minimum execution time: 998_338_000 picoseconds. - Weight::from_parts(468_412_001, 0) - .saturating_add(Weight::from_parts(0, 56458)) - // Standard Error: 20_559 - .saturating_add(Weight::from_parts(56_965_025, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(27)) - .saturating_add(T::DbWeight::get().writes(15)) + // Measured: `67518` + // Estimated: `73458 + v * (23 ±0)` + // Minimum execution time: 844_022_000 picoseconds. + Weight::from_parts(456_682_337, 0) + .saturating_add(Weight::from_parts(0, 73458)) + // Standard Error: 16_403 + .saturating_add(Weight::from_parts(41_871_245, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(28)) + .saturating_add(T::DbWeight::get().writes(16)) .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) } - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:0) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion AvailabilityBitfields (r:0 w:1) - /// Proof Skipped: ParaInclusion AvailabilityBitfields (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Included (r:0 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_bitfields() -> Weight { // Proof Size summary in bytes: - // Measured: `42352` - // Estimated: `48292` - // Minimum execution time: 457_404_000 picoseconds. - Weight::from_parts(485_416_000, 0) - .saturating_add(Weight::from_parts(0, 48292)) - .saturating_add(T::DbWeight::get().reads(25)) + // Measured: `43196` + // Estimated: `49136` + // Minimum execution time: 438_637_000 picoseconds. + Weight::from_parts(458_342_000, 0) + .saturating_add(Weight::from_parts(0, 49136)) + .saturating_add(T::DbWeight::get().reads(26)) .saturating_add(T::DbWeight::get().writes(16)) } - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:0) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: ParasDisputes Included (r:0 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `v` is `[101, 200]`. fn enter_backed_candidates_variable(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `42387` - // Estimated: `48327` - // Minimum execution time: 6_864_029_000 picoseconds. - Weight::from_parts(1_237_704_892, 0) - .saturating_add(Weight::from_parts(0, 48327)) - // Standard Error: 33_413 - .saturating_add(Weight::from_parts(56_199_819, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(28)) - .saturating_add(T::DbWeight::get().writes(15)) + // Measured: `43269` + // Estimated: `49209` + // Minimum execution time: 5_955_361_000 picoseconds. + Weight::from_parts(1_285_398_956, 0) + .saturating_add(Weight::from_parts(0, 49209)) + // Standard Error: 57_369 + .saturating_add(Weight::from_parts(47_073_853, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(29)) + .saturating_add(T::DbWeight::get().writes(16)) } - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:0) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeHash (r:1 w:0) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeRestrictionSignal (r:1 w:0) - /// Proof Skipped: Paras UpgradeRestrictionSignal (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: ParasDisputes Included (r:0 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_backed_candidate_code_upgrade() -> Weight { // Proof Size summary in bytes: - // Measured: `42414` - // Estimated: `48354` - // Minimum execution time: 43_320_529_000 picoseconds. - Weight::from_parts(45_622_613_000, 0) - .saturating_add(Weight::from_parts(0, 48354)) - .saturating_add(T::DbWeight::get().reads(30)) - .saturating_add(T::DbWeight::get().writes(15)) + // Measured: `43282` + // Estimated: `49222` + // Minimum execution time: 42_128_606_000 picoseconds. + Weight::from_parts(42_822_806_000, 0) + .saturating_add(Weight::from_parts(0, 49222)) + .saturating_add(T::DbWeight::get().reads(31)) + .saturating_add(T::DbWeight::get().writes(16)) } } diff --git a/prdoc/pr_3479.prdoc b/prdoc/pr_3479.prdoc new file mode 100644 index 00000000000..1e44ce5646b --- /dev/null +++ b/prdoc/pr_3479.prdoc @@ -0,0 +1,8 @@ +title: "Elastic scaling: runtime dependency tracking and enactment" + +doc: + - audience: Node Dev + description: | + Adds support in the inclusion and paras_inherent runtime modules for backing and including multiple candidates of the same para if they form a chain. + +crates: [ ] -- GitLab From 64a707a4824ad12eb9ab715966dbd0d2b0efaf01 Mon Sep 17 00:00:00 2001 From: ordian Date: Thu, 21 Mar 2024 12:22:16 +0100 Subject: [PATCH 011/128] approval-voting: remove some inefficiences on startup (#3747) Small refactoring to reduce the algorithmic complexity of the initial message distribution in approval voting after a sync from O(n_candidates ^ 2) to O(n_candidates). --- polkadot/node/core/approval-voting/src/lib.rs | 274 +++++++++--------- 1 file changed, 129 insertions(+), 145 deletions(-) diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 8cc16a6e1ec..1d7ab3eee21 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -55,9 +55,9 @@ use polkadot_node_subsystem_util::{ }; use polkadot_primitives::{ vstaging::{ApprovalVoteMultipleCandidates, ApprovalVotingParams}, - BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, DisputeStatement, ExecutorParams, - GroupIndex, Hash, PvfExecKind, SessionIndex, SessionInfo, ValidDisputeStatementKind, - ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, + BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, DisputeStatement, + ExecutorParams, GroupIndex, Hash, PvfExecKind, SessionIndex, SessionInfo, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair; @@ -1285,10 +1285,10 @@ fn cores_to_candidate_indices( // Map from core index to candidate index. for claimed_core_index in core_indices.iter_ones() { - if let Some(candidate_index) = block_entry + // Candidates are sorted by core index. + if let Ok(candidate_index) = block_entry .candidates() - .iter() - .position(|(core_index, _)| core_index.0 == claimed_core_index as u32) + .binary_search_by_key(&(claimed_core_index as u32), |(core_index, _)| core_index.0) { candidate_indices.push(candidate_index as _); } @@ -1297,6 +1297,21 @@ fn cores_to_candidate_indices( CandidateBitfield::try_from(candidate_indices) } +// Returns the claimed core bitfield from the assignment cert and the core index +// from the block entry. +fn get_core_indices_on_startup( + assignment: &AssignmentCertKindV2, + block_entry_core_index: CoreIndex, +) -> CoreBitfield { + match &assignment { + AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => core_bitfield.clone(), + AssignmentCertKindV2::RelayVRFModulo { sample: _ } => + CoreBitfield::try_from(vec![block_entry_core_index]).expect("Not an empty vec; qed"), + AssignmentCertKindV2::RelayVRFDelay { core_index } => + CoreBitfield::try_from(vec![*core_index]).expect("Not an empty vec; qed"), + } +} + // Returns the claimed core bitfield from the assignment cert, the candidate hash and a // `BlockEntry`. Can fail only for VRF Delay assignments for which we cannot find the candidate hash // in the block entry which indicates a bug or corrupted storage. @@ -1367,7 +1382,7 @@ async fn distribution_messages_for_activation( session: block_entry.session(), }); let mut signatures_queued = HashSet::new(); - for (_, candidate_hash) in block_entry.candidates() { + for (core_index, candidate_hash) in block_entry.candidates() { let _candidate_span = distribution_message_span.child("candidate").with_candidate(*candidate_hash); let candidate_entry = match db.load_candidate_entry(&candidate_hash)? { @@ -1389,152 +1404,121 @@ async fn distribution_messages_for_activation( match approval_entry.local_statements() { (None, None) | (None, Some(_)) => {}, // second is impossible case. (Some(assignment), None) => { - if let Some(claimed_core_indices) = get_assignment_core_indices( - &assignment.cert().kind, - &candidate_hash, - &block_entry, - ) { - if block_entry.has_candidates_pending_signature() { - delayed_approvals_timers.maybe_arm_timer( - state.clock.tick_now(), - state.clock.as_ref(), - block_entry.block_hash(), - assignment.validator_index(), - ) - } + let claimed_core_indices = + get_core_indices_on_startup(&assignment.cert().kind, *core_index); + + if block_entry.has_candidates_pending_signature() { + delayed_approvals_timers.maybe_arm_timer( + state.clock.tick_now(), + state.clock.as_ref(), + block_entry.block_hash(), + assignment.validator_index(), + ) + } - match cores_to_candidate_indices( - &claimed_core_indices, - &block_entry, - ) { - Ok(bitfield) => { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?candidate_entry.candidate_receipt().hash(), - ?block_hash, - "Discovered, triggered assignment, not approved yet", - ); - - let indirect_cert = IndirectAssignmentCertV2 { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), - }; - messages.push( - ApprovalDistributionMessage::DistributeAssignment( - indirect_cert.clone(), - bitfield.clone(), - ), - ); - - if !block_entry - .candidate_is_pending_signature(*candidate_hash) - { - let ExtendedSessionInfo { ref executor_params, .. } = - match get_extended_session_info( - session_info_provider, - ctx.sender(), - block_entry.block_hash(), - block_entry.session(), - ) - .await - { - Some(i) => i, - None => continue, - }; - - actions.push(Action::LaunchApproval { - claimed_candidate_indices: bitfield, - candidate_hash: candidate_entry - .candidate_receipt() - .hash(), - indirect_cert, - assignment_tranche: assignment.tranche(), - relay_block_hash: block_hash, - session: block_entry.session(), - executor_params: executor_params.clone(), - candidate: candidate_entry - .candidate_receipt() - .clone(), - backing_group: approval_entry.backing_group(), - distribute_assignment: false, - }); - } - }, - Err(err) => { - // Should never happen. If we fail here it means the - // assignment is null (no cores claimed). - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?candidate_hash, - ?err, - "Failed to create assignment bitfield", - ); - }, - } - } else { - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?candidate_hash, - "Cannot get assignment claimed core indices", - ); + match cores_to_candidate_indices(&claimed_core_indices, &block_entry) { + Ok(bitfield) => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_entry.candidate_receipt().hash(), + ?block_hash, + "Discovered, triggered assignment, not approved yet", + ); + + let indirect_cert = IndirectAssignmentCertV2 { + block_hash, + validator: assignment.validator_index(), + cert: assignment.cert().clone(), + }; + messages.push( + ApprovalDistributionMessage::DistributeAssignment( + indirect_cert.clone(), + bitfield.clone(), + ), + ); + + if !block_entry.candidate_is_pending_signature(*candidate_hash) + { + let ExtendedSessionInfo { ref executor_params, .. } = + match get_extended_session_info( + session_info_provider, + ctx.sender(), + block_entry.block_hash(), + block_entry.session(), + ) + .await + { + Some(i) => i, + None => continue, + }; + + actions.push(Action::LaunchApproval { + claimed_candidate_indices: bitfield, + candidate_hash: candidate_entry + .candidate_receipt() + .hash(), + indirect_cert, + assignment_tranche: assignment.tranche(), + relay_block_hash: block_hash, + session: block_entry.session(), + executor_params: executor_params.clone(), + candidate: candidate_entry.candidate_receipt().clone(), + backing_group: approval_entry.backing_group(), + distribute_assignment: false, + }); + } + }, + Err(err) => { + // Should never happen. If we fail here it means the + // assignment is null (no cores claimed). + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + ?err, + "Failed to create assignment bitfield", + ); + }, } }, (Some(assignment), Some(approval_sig)) => { - if let Some(claimed_core_indices) = get_assignment_core_indices( - &assignment.cert().kind, - &candidate_hash, - &block_entry, - ) { - match cores_to_candidate_indices( - &claimed_core_indices, - &block_entry, - ) { - Ok(bitfield) => messages.push( - ApprovalDistributionMessage::DistributeAssignment( - IndirectAssignmentCertV2 { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), - }, - bitfield, - ), - ), - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?candidate_hash, - ?err, - "Failed to create assignment bitfield", - ); - // If we didn't send assignment, we don't send approval. - continue - }, - } - if signatures_queued - .insert(approval_sig.signed_candidates_indices.clone()) - { - messages.push(ApprovalDistributionMessage::DistributeApproval( - IndirectSignedApprovalVoteV2 { + let claimed_core_indices = + get_core_indices_on_startup(&assignment.cert().kind, *core_index); + match cores_to_candidate_indices(&claimed_core_indices, &block_entry) { + Ok(bitfield) => messages.push( + ApprovalDistributionMessage::DistributeAssignment( + IndirectAssignmentCertV2 { block_hash, - candidate_indices: approval_sig - .signed_candidates_indices, validator: assignment.validator_index(), - signature: approval_sig.signature, + cert: assignment.cert().clone(), }, - )) - }; - } else { - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?candidate_hash, - "Cannot get assignment claimed core indices", - ); + bitfield, + ), + ), + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + ?err, + "Failed to create assignment bitfield", + ); + // If we didn't send assignment, we don't send approval. + continue + }, } + if signatures_queued + .insert(approval_sig.signed_candidates_indices.clone()) + { + messages.push(ApprovalDistributionMessage::DistributeApproval( + IndirectSignedApprovalVoteV2 { + block_hash, + candidate_indices: approval_sig.signed_candidates_indices, + validator: assignment.validator_index(), + signature: approval_sig.signature, + }, + )) + }; }, } }, -- GitLab From 9922fd39437cea5797564145bfd1adb989fd57d1 Mon Sep 17 00:00:00 2001 From: kvalerio <24193167+kevin-valerio@users.noreply.github.com> Date: Thu, 21 Mar 2024 13:18:41 +0100 Subject: [PATCH 012/128] there's a typo (#3779) There was a typo, so now, there's no more typo. Co-authored-by: Liam Aharon --- substrate/primitives/runtime/src/multiaddress.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/runtime/src/multiaddress.rs b/substrate/primitives/runtime/src/multiaddress.rs index 89b0a3bcf8c..c435606312e 100644 --- a/substrate/primitives/runtime/src/multiaddress.rs +++ b/substrate/primitives/runtime/src/multiaddress.rs @@ -32,7 +32,7 @@ pub enum MultiAddress { Raw(Vec), /// It's a 32 byte representation. Address32([u8; 32]), - /// Its a 20 byte representation. + /// It's a 20 byte representation. Address20([u8; 20]), } -- GitLab From 46ba85500ffc77fa8e267c5f38b2c213550d68fa Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Thu, 21 Mar 2024 14:46:55 +0200 Subject: [PATCH 013/128] Fix toml formatting (#3782) Make taplo happy --- substrate/primitives/arithmetic/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 64c1025b585..120edd06a66 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -43,7 +43,7 @@ std = [ "scale-info/std", "serde/std", "sp-crypto-hashing/std", - "sp-std/std", + "sp-std/std", ] # Serde support without relying on std features. serde = ["dep:serde", "scale-info/serde"] -- GitLab From 01d65f6b995021bfe7be184581895dc374285f06 Mon Sep 17 00:00:00 2001 From: Alejandro Martinez Andres <11448715+al3mart@users.noreply.github.com> Date: Thu, 21 Mar 2024 17:49:23 +0100 Subject: [PATCH 014/128] Revert `SendXcmOrigin` in Rococo & Westend (#2571) Based on issue [#2512](https://github.com/paritytech/polkadot-sdk/issues/2512), it seems that some ecosystem teams are using these networks to set up their staging environments and test certain use cases, some of them involving sending XCMs from the relay with origins not allowed in the current configuration. This change reverts the configuration of `SendXcmOrigin`. --------- Co-authored-by: Adrian Catangiu --- polkadot/runtime/rococo/src/xcm_config.rs | 8 +++----- polkadot/runtime/westend/src/xcm_config.rs | 4 +++- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 328879715de..c7063bd7ad6 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -259,11 +259,9 @@ pub type LocalPalletOriginToLocation = ( impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; - // We only allow the root, fellows and the staking admin to send messages. - // This is basically safe to enable for everyone (safe the possibility of someone spamming the - // parachain if they're willing to pay the KSM to send from the Relay-chain), but it's useless - // until we bring in XCM v3 which will make `DescendOrigin` a bit more useful. - type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + // Note that this configuration of `SendXcmOrigin` is different from the one present in + // production. + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmRouter = XcmRouter; // Anyone can execute XCM messages locally. type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 73127cb1efd..400843c5835 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -272,7 +272,9 @@ pub type LocalPalletOriginToLocation = ( impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + // Note that this configuration of `SendXcmOrigin` is different from the one present in + // production. + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmRouter = XcmRouter; // Anyone can execute XCM messages locally... type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; -- GitLab From ea5f4e9a4d78234eb68bbb9130a24b0598c7893a Mon Sep 17 00:00:00 2001 From: "Will | Paradox | ParaNodes.io" <79228812+paradox-tt@users.noreply.github.com> Date: Fri, 22 Mar 2024 00:07:24 +0000 Subject: [PATCH 015/128] Adding LF's bootnodes to relay and system chains (#3514) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Good day, I'm seeking to add the following bootnodes for Kusama and Polkadot's relay and system chains. The following commands can be used to test connectivity. All node keys are backed up. Polkadot: ``` polkadot --chain polkadot --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/boot-polkadot.luckyfriday.io/tcp/443/wss/p2p/12D3KooWAdyiVAaeGdtBt6vn5zVetwA4z4qfm9Fi2QCSykN1wTBJ" --no-hardware-benchmarks ``` Assethub-Polkadot: ``` polkadot-parachain --chain asset-hub-polkadot --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/boot-polkadot-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWDR9M7CjV1xdjCRbRwkFn1E7sjMaL4oYxGyDWxuLrFc2J" --no-hardware-benchmarks ``` Bridgehub-Polkadot: ``` polkadot-parachain --chain bridge-hub-polkadot --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/boot-polkadot-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWKf3mBXHjLbwtPqv1BdbQuwbFNcQQYxASS7iQ25264AXH" --no-hardware-benchmarks ``` Collectives-Polkadot ``` polkadot-parachain --chain collectives-polkadot --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/boot-polkadot-collectives.luckyfriday.io/tcp/443/wss/p2p/12D3KooWCzifnPooTt4kvTnXT7FTKTymVL7xn7DURQLsS2AKpf6w" --no-hardware-benchmarks ``` Kusama: ``` polkadot --chain kusama --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/boot-kusama.luckyfriday.io/tcp/443/wss/p2p/12D3KooWS1Lu6DmK8YHSvkErpxpcXmk14vG6y4KVEFEkd9g62PP8" --no-hardware-benchmarks ``` Assethub-Kusama: ``` polkadot-parachain --chain asset-hub-kusama --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/boot-kusama-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWSwaeFs6FNgpgh54fdoxSDAA4nJNaPE3PAcse2GRrG7b3" --no-hardware-benchmarks ``` Bridgehub-Kusama: ``` polkadot-parachain --chain bridge-hub-kusama --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/boot-kusama-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWQybw6AFmAvrFfwUQnNxUpS12RovapD6oorh2mAJr4xyd" --no-hardware-benchmarks ``` Co-authored-by: Bastian Köcher --- cumulus/parachains/chain-specs/asset-hub-kusama.json | 3 ++- cumulus/parachains/chain-specs/asset-hub-polkadot.json | 3 ++- cumulus/parachains/chain-specs/bridge-hub-kusama.json | 3 ++- cumulus/parachains/chain-specs/bridge-hub-polkadot.json | 3 ++- cumulus/parachains/chain-specs/collectives-polkadot.json | 3 ++- polkadot/node/service/chain-specs/kusama.json | 1 + polkadot/node/service/chain-specs/polkadot.json | 3 ++- 7 files changed, 13 insertions(+), 6 deletions(-) diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index 654275eade8..66a705a4086 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -27,7 +27,8 @@ "/dns/mine14.rotko.net/tcp/34524/ws/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30511/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU", - "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30513/wss/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU" + "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30513/wss/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU", + "/dns/boot-kusama-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWSwaeFs6FNgpgh54fdoxSDAA4nJNaPE3PAcse2GRrG7b3" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index 454060d2a87..16caa52ba91 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -27,7 +27,8 @@ "/dns/mint14.rotko.net/tcp/34514/ws/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30508/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R", - "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30510/wss/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R" + "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30510/wss/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R", + "/dns/boot-polkadot-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWDR9M7CjV1xdjCRbRwkFn1E7sjMaL4oYxGyDWxuLrFc2J" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 90b70b05016..6644ea41ab7 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -27,7 +27,8 @@ "/dns/kbr13.rotko.net/tcp/34553/ws/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30520/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV", - "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV" + "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV", + "/dns/boot-kusama-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWQybw6AFmAvrFfwUQnNxUpS12RovapD6oorh2mAJr4xyd" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index a9444b89e1e..c51c5eff89b 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -23,7 +23,8 @@ "/dns/pbr13.rotko.net/tcp/34543/ws/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30517/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", - "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30519/wss/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB" + "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30519/wss/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", + "/dns/boot-polkadot-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWKf3mBXHjLbwtPqv1BdbQuwbFNcQQYxASS7iQ25264AXH" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index 259669cf37a..ce80e21ae62 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -27,7 +27,8 @@ "/dns/pch13.rotko.net/tcp/34573/ws/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30526/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", - "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff" + "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", + "/dns/boot-polkadot-collectives.luckyfriday.io/tcp/443/wss/p2p/12D3KooWCzifnPooTt4kvTnXT7FTKTymVL7xn7DURQLsS2AKpf6w" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index fd60cb8b6c1..490b39ee696 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -35,6 +35,7 @@ "/dns/ksm14.rotko.net/tcp/35224/wss/p2p/12D3KooWAa5THTw8HPfnhEei23HdL8P9McBXdozG2oTtMMksjZkK", "/dns/ksm14.rotko.net/tcp/33224/p2p/12D3KooWAa5THTw8HPfnhEei23HdL8P9McBXdozG2oTtMMksjZkK", "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30333/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT", + "/dns/boot-kusama.luckyfriday.io/tcp/443/wss/p2p/12D3KooWS1Lu6DmK8YHSvkErpxpcXmk14vG6y4KVEFEkd9g62PP8", "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30334/wss/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT" ], "telemetryEndpoints": [ diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index c235cdd09d8..5f8d88102d7 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -36,7 +36,8 @@ "/dns/dot14.rotko.net/tcp/35214/wss/p2p/12D3KooWPyEvPEXghnMC67Gff6PuZiSvfx3fmziKiPZcGStZ5xff", "/dns/dot14.rotko.net/tcp/33214/p2p/12D3KooWPyEvPEXghnMC67Gff6PuZiSvfx3fmziKiPZcGStZ5xff", "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30333/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ", - "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30334/wss/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ" + "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30334/wss/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ", + "/dns/boot-polkadot.luckyfriday.io/tcp/443/wss/p2p/12D3KooWAdyiVAaeGdtBt6vn5zVetwA4z4qfm9Fi2QCSykN1wTBJ" ], "telemetryEndpoints": [ [ -- GitLab From 3410dfb3929462da88be2da813f121d8b1cf46b3 Mon Sep 17 00:00:00 2001 From: Clara van Staden Date: Fri, 22 Mar 2024 11:25:28 +0200 Subject: [PATCH 016/128] Snowbridge Beacon header age check (#3727) ## Bug Explanation Adds a check that prevents finalized headers with a gap larger than the sync committee period being imported, which could cause execution headers in the gap being unprovable. The current version of the Ethereum client checks that there is a header at least every sync committee, but it doesn't check that the headers are within a sync period of each other. For example: Header 100 (sync committee period 1) Header 9000 (sync committee period 2) (8900 blocks apart) These headers are in adjacent sync committees, but more than the sync committee period (8192 blocks) apart. The reason we need a header every 8192 slots at least, is the header is used to prove messages within the last 8192 blocks. If we import header 9000, and we receive a message to be verified at header 200, the `block_roots` field of header 9000 won't contain the header in order to do the ancestry check. ## Environment While running in Rococo, this edge case was discovered after the relayer was offline for a few days. It is unlikely, but not impossible, to happen again and so it should be backported to polkadot-sdk 1.7.0 (so that [polkadot-fellows/runtimes](https://github.com/polkadot-fellows/runtimes) can be updated with the fix). Our Ethereum client has been operational on Rococo for the past few months, and this been the only major issue discovered so far. ### Unrelated Change An unrelated nit: Removes a left over file that should have been deleted when the `parachain` directory was removed. --------- Co-authored-by: claravanstaden --- .../pallets/ethereum-client/src/lib.rs | 15 + .../pallets/ethereum-client/src/tests.rs | 57 +++- .../ethereum-beacon-client/src/mock.rs | 259 ------------------ 3 files changed, 71 insertions(+), 260 deletions(-) delete mode 100644 bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs diff --git a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs index a54d4a05ac5..fc2ab2fbb58 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs @@ -130,6 +130,10 @@ pub mod pallet { InvalidExecutionHeaderProof, InvalidAncestryMerkleProof, InvalidBlockRootsRootMerkleProof, + /// The gap between the finalized headers is larger than the sync committee period, + /// rendering execution headers unprovable using ancestry proofs (blocks root size is + /// the same as the sync committee period slots). + InvalidFinalizedHeaderGap, HeaderNotFinalized, BlockBodyHashTreeRootFailed, HeaderHashTreeRootFailed, @@ -398,6 +402,17 @@ pub mod pallet { Error::::IrrelevantUpdate ); + // Verify the finalized header gap between the current finalized header and new imported + // header is not larger than the sync committee period, otherwise we cannot do + // ancestry proofs for execution headers in the gap. + ensure!( + latest_finalized_state + .slot + .saturating_add(config::SLOTS_PER_HISTORICAL_ROOT as u64) >= + update.finalized_header.slot, + Error::::InvalidFinalizedHeaderGap + ); + // Verify that the `finality_branch`, if present, confirms `finalized_header` to match // the finalized checkpoint root saved in the state of `attested_header`. let finalized_block_root: H256 = update diff --git a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs index 50b6a25c342..4a7b7b45886 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs @@ -15,7 +15,7 @@ use crate::mock::{ pub use crate::mock::*; -use crate::config::{EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SLOTS_PER_EPOCH}; +use crate::config::{EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SLOTS_PER_EPOCH, SLOTS_PER_HISTORICAL_ROOT}; use frame_support::{assert_err, assert_noop, assert_ok}; use hex_literal::hex; use primitives::{ @@ -884,6 +884,61 @@ fn submit_execution_header_not_finalized() { }); } +/// Check that a gap of more than 8192 slots between finalized headers is not allowed. +#[test] +fn submit_finalized_header_update_with_too_large_gap() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let update = Box::new(load_sync_committee_update_fixture()); + let mut next_update = Box::new(load_next_sync_committee_update_fixture()); + + // Adds 8193 slots, so that the next update is still in the next sync committee, but the + // gap between the finalized headers is more than 8192 slots. + let slot_with_large_gap = checkpoint.header.slot + SLOTS_PER_HISTORICAL_ROOT as u64 + 1; + + next_update.finalized_header.slot = slot_with_large_gap; + // Adding some slots to the attested header and signature slot since they need to be ahead + // of the finalized header. + next_update.attested_header.slot = slot_with_large_gap + 33; + next_update.signature_slot = slot_with_large_gap + 43; + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), update.clone())); + assert!(>::exists()); + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), next_update.clone()), + Error::::InvalidFinalizedHeaderGap + ); + }); +} + +/// Check that a gap of 8192 slots between finalized headers is allowed. +#[test] +fn submit_finalized_header_update_with_gap_at_limit() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let update = Box::new(load_sync_committee_update_fixture()); + let mut next_update = Box::new(load_next_sync_committee_update_fixture()); + + next_update.finalized_header.slot = checkpoint.header.slot + SLOTS_PER_HISTORICAL_ROOT as u64; + // Adding some slots to the attested header and signature slot since they need to be ahead + // of the finalized header. + next_update.attested_header.slot = + checkpoint.header.slot + SLOTS_PER_HISTORICAL_ROOT as u64 + 33; + next_update.signature_slot = checkpoint.header.slot + SLOTS_PER_HISTORICAL_ROOT as u64 + 43; + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), update.clone())); + assert!(>::exists()); + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), next_update.clone()), + // The test should pass the InvalidFinalizedHeaderGap check, and will fail at the + // next check, the merkle proof, because we changed the next_update slots. + Error::::InvalidHeaderMerkleProof + ); + }); +} + /* IMPLS */ #[test] diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs deleted file mode 100644 index d2cca373e92..00000000000 --- a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs +++ /dev/null @@ -1,259 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -use crate as ethereum_beacon_client; -use frame_support::parameter_types; -use pallet_timestamp; -use primitives::{Fork, ForkVersions}; -use sp_core::H256; -use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; - -#[cfg(not(feature = "beacon-spec-mainnet"))] -pub mod minimal { - use super::*; - - use crate::config; - use frame_support::derive_impl; - use hex_literal::hex; - use primitives::CompactExecutionHeader; - use snowbridge_core::inbound::{Log, Proof}; - use sp_runtime::BuildStorage; - use std::{fs::File, path::PathBuf}; - - type Block = frame_system::mocking::MockBlock; - - frame_support::construct_runtime!( - pub enum Test { - System: frame_system::{Pallet, Call, Storage, Event}, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event}, - } - ); - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const SS58Prefix: u8 = 42; - } - - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] - impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type RuntimeTask = RuntimeTask; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type PalletInfo = PalletInfo; - type SS58Prefix = SS58Prefix; - type Nonce = u64; - type Block = Block; - } - - impl pallet_timestamp::Config for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = (); - type WeightInfo = (); - } - - parameter_types! { - pub const ExecutionHeadersPruneThreshold: u32 = 10; - pub const ChainForkVersions: ForkVersions = ForkVersions{ - genesis: Fork { - version: [0, 0, 0, 1], // 0x00000001 - epoch: 0, - }, - altair: Fork { - version: [1, 0, 0, 1], // 0x01000001 - epoch: 0, - }, - bellatrix: Fork { - version: [2, 0, 0, 1], // 0x02000001 - epoch: 0, - }, - capella: Fork { - version: [3, 0, 0, 1], // 0x03000001 - epoch: 0, - }, - }; - } - - impl ethereum_beacon_client::Config for Test { - type RuntimeEvent = RuntimeEvent; - type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; - type WeightInfo = (); - } - - // Build genesis storage according to the mock runtime. - pub fn new_tester() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); - ext - } - - fn load_fixture(basename: &str) -> Result - where - T: for<'de> serde::Deserialize<'de>, - { - let filepath: PathBuf = - [env!("CARGO_MANIFEST_DIR"), "tests", "fixtures", basename].iter().collect(); - serde_json::from_reader(File::open(filepath).unwrap()) - } - - pub fn load_execution_header_update_fixture() -> primitives::ExecutionHeaderUpdate { - load_fixture("execution-header-update.minimal.json").unwrap() - } - - pub fn load_checkpoint_update_fixture( - ) -> primitives::CheckpointUpdate<{ config::SYNC_COMMITTEE_SIZE }> { - load_fixture("initial-checkpoint.minimal.json").unwrap() - } - - pub fn load_sync_committee_update_fixture( - ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { - load_fixture("sync-committee-update.minimal.json").unwrap() - } - - pub fn load_finalized_header_update_fixture( - ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { - load_fixture("finalized-header-update.minimal.json").unwrap() - } - - pub fn load_next_sync_committee_update_fixture( - ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { - load_fixture("next-sync-committee-update.minimal.json").unwrap() - } - - pub fn load_next_finalized_header_update_fixture( - ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { - load_fixture("next-finalized-header-update.minimal.json").unwrap() - } - - pub fn get_message_verification_payload() -> (Log, Proof) { - ( - Log { - address: hex!("ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0").into(), - topics: vec![ - hex!("1b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ad").into(), - hex!("00000000000000000000000000000000000000000000000000000000000003e8").into(), - hex!("0000000000000000000000000000000000000000000000000000000000000001").into(), - ], - data: hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000").into(), - }, - Proof { - block_hash: hex!("05aaa60b0f27cce9e71909508527264b77ee14da7b5bf915fcc4e32715333213").into(), - tx_index: 0, - data: (vec![ - hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb").to_vec(), - hex!("d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c185510").to_vec(), - hex!("b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646").to_vec(), - ], vec![ - hex!("f90131a0b601337b3aa10a671caa724eba641e759399979856141d3aea6b6b4ac59b889ba00c7d5dd48be9060221a02fb8fa213860b4c50d47046c8fa65ffaba5737d569e0a094601b62a1086cd9c9cb71a7ebff9e718f3217fd6e837efe4246733c0a196f63a06a4b0dd0aefc37b3c77828c8f07d1b7a2455ceb5dbfd3c77d7d6aeeddc2f7e8ca0d6e8e23142cdd8ec219e1f5d8b56aa18e456702b195deeaa210327284d42ade4a08a313d4c87023005d1ab631bbfe3f5de1e405d0e66d0bef3e033f1e5711b5521a0bf09a5d9a48b10ade82b8d6a5362a15921c8b5228a3487479b467db97411d82fa0f95cccae2a7c572ef3c566503e30bac2b2feb2d2f26eebf6d870dcf7f8cf59cea0d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c1855108080808080808080").to_vec(), - hex!("f851a0b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646a060a634b9280e3a23fb63375e7bbdd9ab07fd379ab6a67e2312bbc112195fa358808080808080808080808080808080").to_vec(), - hex!("f9030820b9030402f90300018301d6e2b9010000000000000800000000000020040008000000000000000000000000400000008000000000000000000000000000000000000000000000000000000000042010000000001000000000000000000000000000000000040000000000000000000000000000000000000000000000008000000000000000002000000000000000000000000200000000000000200000000000100000000040000001000200008000000000000200000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000f901f5f87a942ffa5ecdbe006d30397c7636d3e015eee251369ff842a0c965575a00553e094ca7c5d14f02e107c258dda06867cbf9e0e69f80e71bbcc1a000000000000000000000000000000000000000000000000000000000000003e8a000000000000000000000000000000000000000000000000000000000000003e8f9011c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000001b8a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000f858948cf6147918a5cbb672703f879f385036f8793a24e1a01449abf21e49fd025f33495e77f7b1461caefdd3d4bb646424a3f445c4576a5ba0000000000000000000000000440edffa1352b13227e8ee646f3ea37456dec701").to_vec(), - ]), - } - ) - } - - pub fn get_message_verification_header() -> CompactExecutionHeader { - CompactExecutionHeader { - parent_hash: hex!("04a7f6ab8282203562c62f38b0ab41d32aaebe2c7ea687702b463148a6429e04") - .into(), - block_number: 55, - state_root: hex!("894d968712976d613519f973a317cb0781c7b039c89f27ea2b7ca193f7befdb3") - .into(), - receipts_root: hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb") - .into(), - } - } -} - -#[cfg(feature = "beacon-spec-mainnet")] -pub mod mainnet { - use super::*; - use frame_support::derive_impl; - - type Block = frame_system::mocking::MockBlock; - use sp_runtime::BuildStorage; - - frame_support::construct_runtime!( - pub enum Test { - System: frame_system::{Pallet, Call, Storage, Event}, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event}, - } - ); - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const SS58Prefix: u8 = 42; - } - - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] - impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type RuntimeTask = RuntimeTask; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type PalletInfo = PalletInfo; - type SS58Prefix = SS58Prefix; - type Nonce = u64; - type Block = Block; - } - - impl pallet_timestamp::Config for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = (); - type WeightInfo = (); - } - - parameter_types! { - pub const ChainForkVersions: ForkVersions = ForkVersions{ - genesis: Fork { - version: [0, 0, 16, 32], // 0x00001020 - epoch: 0, - }, - altair: Fork { - version: [1, 0, 16, 32], // 0x01001020 - epoch: 36660, - }, - bellatrix: Fork { - version: [2, 0, 16, 32], // 0x02001020 - epoch: 112260, - }, - capella: Fork { - version: [3, 0, 16, 32], // 0x03001020 - epoch: 162304, - }, - }; - pub const ExecutionHeadersPruneThreshold: u32 = 10; - } - - impl ethereum_beacon_client::Config for Test { - type RuntimeEvent = RuntimeEvent; - type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; - type WeightInfo = (); - } - - // Build genesis storage according to the mock runtime. - pub fn new_tester() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); - ext - } -} -- GitLab From 22d5b80d4486f2e9d53bf8ad1eb3efa11a4136af Mon Sep 17 00:00:00 2001 From: Vincent Geddes Date: Fri, 22 Mar 2024 13:52:51 +0200 Subject: [PATCH 017/128] Add a linear fee multiplier (#127) (#3790) Bridging fees are calculated using a static ETH/DOT exchange rate that can deviate significantly from the real-world exchange rate. We therefore need to add a safety margin to the fee so that users almost aways cover the cost of relaying. # FAQ > Why introduce a `multiplier` parameter instead of configuring an exchange rate which already has a safety factor applied? When converting from ETH to DOT, we need to _divide_ the multiplier by the exchange rate, and to convert from DOT to ETH we need to _multiply_ the multiplier by the exchange rate. > Other input parameters to the fee calculation can also deviate from real-world values. These include substrate weights, gas prices, and so on. Why does the multiplier introduced here not adjust those? A single scalar multiplier won't be able to accommodate the different volatilities efficiently. For example, gas prices are much more volatile than exchange rates, and substrate weights hardly ever change. So the pricing config relating to weights and gas prices should already have some appropriate safety margin pre-applied. # Detailed Changes: * Added `multiplier` field to `PricingParameters` * Outbound-queue fee is multiplied by `multiplier` * This `multiplier` is synced to the Ethereum side * Improved Runtime API for calculating outbound-queue fees. This API makes it much easier to for configure parts of the system in preparation for launch. * Improve and clarify code documentation Upstreamed from https://github.com/Snowfork/polkadot-sdk/pull/127 --------- Co-authored-by: Clara van Staden Co-authored-by: Adrian Catangiu --- .../pallets/inbound-queue/src/mock.rs | 3 +- .../outbound-queue/runtime-api/src/lib.rs | 11 +++-- .../pallets/outbound-queue/src/api.rs | 18 +++++--- .../pallets/outbound-queue/src/lib.rs | 32 +++++++++---- .../pallets/outbound-queue/src/mock.rs | 3 +- .../pallets/outbound-queue/src/test.rs | 46 +++++++++++-------- bridges/snowbridge/pallets/system/src/lib.rs | 2 + bridges/snowbridge/pallets/system/src/mock.rs | 3 +- .../primitives/core/src/outbound.rs | 13 ++++-- .../snowbridge/primitives/core/src/pricing.rs | 5 ++ .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 11 +++-- 11 files changed, 97 insertions(+), 50 deletions(-) diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index 086b27cb828..39e9532ed32 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -173,7 +173,8 @@ parameter_types! { pub Parameters: PricingParameters = PricingParameters { exchange_rate: FixedU128::from_rational(1, 400), fee_per_gas: gwei(20), - rewards: Rewards { local: DOT, remote: meth(1) } + rewards: Rewards { local: DOT, remote: meth(1) }, + multiplier: FixedU128::from_rational(1, 1), }; } diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs index 51f46a7b49c..e6ddaa43935 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs @@ -3,7 +3,10 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::traits::tokens::Balance as BalanceT; -use snowbridge_core::outbound::Message; +use snowbridge_core::{ + outbound::{Command, Fee}, + PricingParameters, +}; use snowbridge_outbound_queue_merkle_tree::MerkleProof; sp_api::decl_runtime_apis! { @@ -11,10 +14,10 @@ sp_api::decl_runtime_apis! { { /// Generate a merkle proof for a committed message identified by `leaf_index`. /// The merkle root is stored in the block header as a - /// `\[`sp_runtime::generic::DigestItem::Other`\]` + /// `sp_runtime::generic::DigestItem::Other` fn prove_message(leaf_index: u64) -> Option; - /// Calculate the delivery fee for `message` - fn calculate_fee(message: Message) -> Option; + /// Calculate the delivery fee for `command` + fn calculate_fee(command: Command, parameters: Option>) -> Fee; } } diff --git a/bridges/snowbridge/pallets/outbound-queue/src/api.rs b/bridges/snowbridge/pallets/outbound-queue/src/api.rs index 44d63f1e2d2..b904819b1b1 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/api.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/api.rs @@ -4,8 +4,12 @@ use crate::{Config, MessageLeaves}; use frame_support::storage::StorageStreamIter; -use snowbridge_core::outbound::{Message, SendMessage}; +use snowbridge_core::{ + outbound::{Command, Fee, GasMeter}, + PricingParameters, +}; use snowbridge_outbound_queue_merkle_tree::{merkle_proof, MerkleProof}; +use sp_core::Get; pub fn prove_message(leaf_index: u64) -> Option where @@ -19,12 +23,14 @@ where Some(proof) } -pub fn calculate_fee(message: Message) -> Option +pub fn calculate_fee( + command: Command, + parameters: Option>, +) -> Fee where T: Config, { - match crate::Pallet::::validate(&message) { - Ok((_, fees)) => Some(fees.total()), - _ => None, - } + let gas_used_at_most = T::GasMeter::maximum_gas_used_at_most(&command); + let parameters = parameters.unwrap_or(T::PricingParameters::get()); + crate::Pallet::::calculate_fee(gas_used_at_most, parameters) } diff --git a/bridges/snowbridge/pallets/outbound-queue/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue/src/lib.rs index 9e949a4791a..9b9dbe854a5 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/lib.rs @@ -47,24 +47,37 @@ //! consume on Ethereum. Using this upper bound, a final fee can be calculated. //! //! The fee calculation also requires the following parameters: -//! * ETH/DOT exchange rate -//! * Ether fee per unit of gas +//! * Average ETH/DOT exchange rate over some period +//! * Max fee per unit of gas that bridge is willing to refund relayers for //! //! By design, it is expected that governance should manually update these //! parameters every few weeks using the `set_pricing_parameters` extrinsic in the //! system pallet. //! +//! This is an interim measure. Once ETH/DOT liquidity pools are available in the Polkadot network, +//! we'll use them as a source of pricing info, subject to certain safeguards. +//! //! ## Fee Computation Function //! //! ```text //! LocalFee(Message) = WeightToFee(ProcessMessageWeight(Message)) -//! RemoteFee(Message) = MaxGasRequired(Message) * FeePerGas + Reward -//! Fee(Message) = LocalFee(Message) + (RemoteFee(Message) / Ratio("ETH/DOT")) +//! RemoteFee(Message) = MaxGasRequired(Message) * Params.MaxFeePerGas + Params.Reward +//! RemoteFeeAdjusted(Message) = Params.Multiplier * (RemoteFee(Message) / Params.Ratio("ETH/DOT")) +//! Fee(Message) = LocalFee(Message) + RemoteFeeAdjusted(Message) //! ``` //! -//! By design, the computed fee is always going to conservative, to cover worst-case -//! costs of dispatch on Ethereum. In future iterations of the design, we will optimize -//! this, or provide a mechanism to asynchronously refund a portion of collected fees. +//! By design, the computed fee includes a safety factor (the `Multiplier`) to cover +//! unfavourable fluctuations in the ETH/DOT exchange rate. +//! +//! ## Fee Settlement +//! +//! On the remote side, in the gateway contract, the relayer accrues +//! +//! ```text +//! Min(GasPrice, Message.MaxFeePerGas) * GasUsed() + Message.Reward +//! ``` +//! Or in plain english, relayers are refunded for gas consumption, using a +//! price that is a minimum of the actual gas price, or `Message.MaxFeePerGas`. //! //! # Extrinsics //! @@ -106,7 +119,7 @@ pub use snowbridge_outbound_queue_merkle_tree::MerkleProof; use sp_core::{H256, U256}; use sp_runtime::{ traits::{CheckedDiv, Hash}, - DigestItem, + DigestItem, Saturating, }; use sp_std::prelude::*; pub use types::{CommittedMessage, ProcessMessageOriginOf}; @@ -366,8 +379,9 @@ pub mod pallet { // downcast to u128 let fee: u128 = fee.try_into().defensive_unwrap_or(u128::MAX); - // convert to local currency + // multiply by multiplier and convert to local currency let fee = FixedU128::from_inner(fee) + .saturating_mul(params.multiplier) .checked_div(¶ms.exchange_rate) .expect("exchange rate is not zero; qed") .into_inner(); diff --git a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs index 850b13dcf31..67877a05c79 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs @@ -77,7 +77,8 @@ parameter_types! { pub Parameters: PricingParameters = PricingParameters { exchange_rate: FixedU128::from_rational(1, 400), fee_per_gas: gwei(20), - rewards: Rewards { local: DOT, remote: meth(1) } + rewards: Rewards { local: DOT, remote: meth(1) }, + multiplier: FixedU128::from_rational(4, 3), }; } diff --git a/bridges/snowbridge/pallets/outbound-queue/src/test.rs b/bridges/snowbridge/pallets/outbound-queue/src/test.rs index 8ed4a318d68..4e9ea36e24b 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/test.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/test.rs @@ -268,28 +268,34 @@ fn encode_digest_item() { } #[test] -fn validate_messages_with_fees() { +fn test_calculate_fees_with_unit_multiplier() { new_tester().execute_with(|| { - let message = mock_message(1000); - let (_, fee) = OutboundQueue::validate(&message).unwrap(); + let gas_used: u64 = 250000; + let price_params: PricingParameters<::Balance> = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 400), + fee_per_gas: 10000_u32.into(), + rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, + multiplier: FixedU128::from_rational(1, 1), + }; + let fee = OutboundQueue::calculate_fee(gas_used, price_params); assert_eq!(fee.local, 698000000); - assert_eq!(fee.remote, 2680000000000); + assert_eq!(fee.remote, 1000000); }); } #[test] -fn test_calculate_fees() { +fn test_calculate_fees_with_multiplier() { new_tester().execute_with(|| { let gas_used: u64 = 250000; - let illegal_price_params: PricingParameters<::Balance> = - PricingParameters { - exchange_rate: FixedU128::from_rational(1, 400), - fee_per_gas: 10000_u32.into(), - rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, - }; - let fee = OutboundQueue::calculate_fee(gas_used, illegal_price_params); + let price_params: PricingParameters<::Balance> = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 400), + fee_per_gas: 10000_u32.into(), + rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, + multiplier: FixedU128::from_rational(4, 3), + }; + let fee = OutboundQueue::calculate_fee(gas_used, price_params); assert_eq!(fee.local, 698000000); - assert_eq!(fee.remote, 1000000); + assert_eq!(fee.remote, 1333333); }); } @@ -297,13 +303,13 @@ fn test_calculate_fees() { fn test_calculate_fees_with_valid_exchange_rate_but_remote_fee_calculated_as_zero() { new_tester().execute_with(|| { let gas_used: u64 = 250000; - let illegal_price_params: PricingParameters<::Balance> = - PricingParameters { - exchange_rate: FixedU128::from_rational(1, 1), - fee_per_gas: 1_u32.into(), - rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, - }; - let fee = OutboundQueue::calculate_fee(gas_used, illegal_price_params.clone()); + let price_params: PricingParameters<::Balance> = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 1), + fee_per_gas: 1_u32.into(), + rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, + multiplier: FixedU128::from_rational(1, 1), + }; + let fee = OutboundQueue::calculate_fee(gas_used, price_params.clone()); assert_eq!(fee.local, 698000000); // Though none zero pricing params the remote fee calculated here is invalid // which should be avoided diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 6e5ceb5e9b1..39c73e3630e 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -159,6 +159,7 @@ pub mod pallet { type DefaultPricingParameters: Get>; /// Cost of delivering a message from Ethereum + #[pallet::constant] type InboundDeliveryCost: Get>; type WeightInfo: WeightInfo; @@ -334,6 +335,7 @@ pub mod pallet { let command = Command::SetPricingParameters { exchange_rate: params.exchange_rate.into(), delivery_cost: T::InboundDeliveryCost::get().saturated_into::(), + multiplier: params.multiplier.into(), }; Self::send(PRIMARY_GOVERNANCE_CHANNEL, command, PaysFee::::No)?; diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index a711eab5d3d..0312456c982 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -193,7 +193,8 @@ parameter_types! { pub Parameters: PricingParameters = PricingParameters { exchange_rate: FixedU128::from_rational(1, 400), fee_per_gas: gwei(20), - rewards: Rewards { local: DOT, remote: meth(1) } + rewards: Rewards { local: DOT, remote: meth(1) }, + multiplier: FixedU128::from_rational(4, 3) }; pub const InboundDeliveryCost: u128 = 1_000_000_000; diff --git a/bridges/snowbridge/primitives/core/src/outbound.rs b/bridges/snowbridge/primitives/core/src/outbound.rs index bce123878d3..0ba0fdb6108 100644 --- a/bridges/snowbridge/primitives/core/src/outbound.rs +++ b/bridges/snowbridge/primitives/core/src/outbound.rs @@ -136,6 +136,8 @@ mod v1 { exchange_rate: UD60x18, // Cost of delivering a message from Ethereum to BridgeHub, in ROC/KSM/DOT delivery_cost: u128, + // Fee multiplier + multiplier: UD60x18, }, } @@ -203,10 +205,11 @@ mod v1 { Token::Uint(U256::from(*transfer_asset_xcm)), Token::Uint(*register_token), ])]), - Command::SetPricingParameters { exchange_rate, delivery_cost } => + Command::SetPricingParameters { exchange_rate, delivery_cost, multiplier } => ethabi::encode(&[Token::Tuple(vec![ Token::Uint(exchange_rate.clone().into_inner()), Token::Uint(U256::from(*delivery_cost)), + Token::Uint(multiplier.clone().into_inner()), ])]), } } @@ -273,7 +276,8 @@ mod v1 { } } -#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(PartialEq))] /// Fee for delivering message pub struct Fee where @@ -346,12 +350,13 @@ pub trait GasMeter { /// the command within the message const MAXIMUM_BASE_GAS: u64; + /// Total gas consumed at most, including verification & dispatch fn maximum_gas_used_at_most(command: &Command) -> u64 { Self::MAXIMUM_BASE_GAS + Self::maximum_dispatch_gas_used_at_most(command) } - /// Measures the maximum amount of gas a command payload will require to dispatch, AFTER - /// validation & verification. + /// Measures the maximum amount of gas a command payload will require to *dispatch*, NOT + /// including validation & verification. fn maximum_dispatch_gas_used_at_most(command: &Command) -> u64; } diff --git a/bridges/snowbridge/primitives/core/src/pricing.rs b/bridges/snowbridge/primitives/core/src/pricing.rs index 33aeda6d15c..0f392c7ad4b 100644 --- a/bridges/snowbridge/primitives/core/src/pricing.rs +++ b/bridges/snowbridge/primitives/core/src/pricing.rs @@ -13,6 +13,8 @@ pub struct PricingParameters { pub rewards: Rewards, /// Ether (wei) fee per gas unit pub fee_per_gas: U256, + /// Fee multiplier + pub multiplier: FixedU128, } #[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)] @@ -43,6 +45,9 @@ where if self.rewards.remote.is_zero() { return Err(InvalidPricingParameters) } + if self.multiplier == FixedU128::zero() { + return Err(InvalidPricingParameters) + } Ok(()) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index bf7483179f2..3980fa0d501 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -38,7 +38,9 @@ pub mod xcm_config; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use snowbridge_beacon_primitives::{Fork, ForkVersions}; use snowbridge_core::{ - gwei, meth, outbound::Message, AgentId, AllowSiblingsOnly, PricingParameters, Rewards, + gwei, meth, + outbound::{Command, Fee}, + AgentId, AllowSiblingsOnly, PricingParameters, Rewards, }; use snowbridge_router_primitives::inbound::MessageToXcm; use sp_api::impl_runtime_apis; @@ -503,7 +505,8 @@ parameter_types! { pub Parameters: PricingParameters = PricingParameters { exchange_rate: FixedU128::from_rational(1, 400), fee_per_gas: gwei(20), - rewards: Rewards { local: 1 * UNITS, remote: meth(1) } + rewards: Rewards { local: 1 * UNITS, remote: meth(1) }, + multiplier: FixedU128::from_rational(1, 1), }; } @@ -1022,8 +1025,8 @@ impl_runtime_apis! { snowbridge_pallet_outbound_queue::api::prove_message::(leaf_index) } - fn calculate_fee(message: Message) -> Option { - snowbridge_pallet_outbound_queue::api::calculate_fee::(message) + fn calculate_fee(command: Command, parameters: Option>) -> Fee { + snowbridge_pallet_outbound_queue::api::calculate_fee::(command, parameters) } } -- GitLab From 9d2963c29d9b7ea949851a166e0cb2792fc66fff Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 22 Mar 2024 14:18:03 +0200 Subject: [PATCH 018/128] Make public addresses go first in authority discovery DHT records (#3757) Make sure explicitly set by the operator public addresses go first in the authority discovery DHT records. Also update `Discovery` behavior to eliminate duplicates in the returned addresses. This PR should improve situation with https://github.com/paritytech/polkadot-sdk/issues/3519. Obsoletes https://github.com/paritytech/polkadot-sdk/pull/3657. --- Cargo.lock | 1 + .../relay-chain-minimal-node/src/lib.rs | 2 + polkadot/node/service/src/lib.rs | 2 + substrate/bin/node/cli/src/service.rs | 2 + .../client/authority-discovery/Cargo.toml | 1 + .../client/authority-discovery/src/lib.rs | 5 + .../client/authority-discovery/src/worker.rs | 92 ++++++++++++++----- substrate/client/network/Cargo.toml | 2 +- substrate/client/network/src/discovery.rs | 19 +++- 9 files changed, 99 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bdbf6ddac26..074b657e767 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15555,6 +15555,7 @@ dependencies = [ "futures-timer", "ip_network", "libp2p", + "linked_hash_set", "log", "multihash 0.18.1", "multihash-codetable", diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index 4bccca59fe3..6aea043713d 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -55,6 +55,7 @@ fn build_authority_discovery_service( prometheus_registry: Option, ) -> AuthorityDiscoveryService { let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; + let auth_disc_public_addresses = config.network.public_addresses.clone(); let authority_discovery_role = sc_authority_discovery::Role::Discover; let dht_event_stream = network.event_stream("authority-discovery").filter_map(|e| async move { match e { @@ -65,6 +66,7 @@ fn build_authority_discovery_service( let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( sc_authority_discovery::WorkerConfig { publish_non_global_ips: auth_disc_publish_non_global_ips, + public_addresses: auth_disc_public_addresses, // Require that authority discovery records are signed. strict_record_validation: true, ..Default::default() diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 83a0afc077e..4f4ede53705 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -807,6 +807,7 @@ pub fn new_full( let shared_voter_state = rpc_setup; let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; + let auth_disc_public_addresses = config.network.public_addresses.clone(); let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); @@ -1061,6 +1062,7 @@ pub fn new_full( let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( sc_authority_discovery::WorkerConfig { publish_non_global_ips: auth_disc_publish_non_global_ips, + public_addresses: auth_disc_public_addresses, // Require that authority discovery records are signed. strict_record_validation: true, ..Default::default() diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 8f2aba6b44c..e4b425e6f96 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -422,6 +422,7 @@ pub fn new_full_base( let shared_voter_state = rpc_setup; let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; + let auth_disc_public_addresses = config.network.public_addresses.clone(); let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); @@ -610,6 +611,7 @@ pub fn new_full_base( sc_authority_discovery::new_worker_and_service_with_config( sc_authority_discovery::WorkerConfig { publish_non_global_ips: auth_disc_publish_non_global_ips, + public_addresses: auth_disc_public_addresses, ..Default::default() }, client.clone(), diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index cdd4052f0b0..26580064b3c 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -29,6 +29,7 @@ multihash = { version = "0.18.1", default-features = false, features = [ "sha2", "std", ] } +linked_hash_set = "0.1.4" log = { workspace = true, default-features = true } prost = "0.12" rand = "0.8.5" diff --git a/substrate/client/authority-discovery/src/lib.rs b/substrate/client/authority-discovery/src/lib.rs index 6bb12804cad..281188de143 100644 --- a/substrate/client/authority-discovery/src/lib.rs +++ b/substrate/client/authority-discovery/src/lib.rs @@ -80,6 +80,10 @@ pub struct WorkerConfig { /// Defaults to `true` to avoid the surprise factor. pub publish_non_global_ips: bool, + /// Public addresses set by the node operator to always publish first in the authority + /// discovery DHT record. + pub public_addresses: Vec, + /// Reject authority discovery records that are not signed by their network identity (PeerId) /// /// Defaults to `false` to provide compatibility with old versions @@ -104,6 +108,7 @@ impl Default for WorkerConfig { // `authority_discovery_dht_event_received`. max_query_interval: Duration::from_secs(10 * 60), publish_non_global_ips: true, + public_addresses: Vec::new(), strict_record_validation: false, } } diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index 9bccb96ff37..b77f0241ec2 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -35,6 +35,7 @@ use addr_cache::AddrCache; use codec::{Decode, Encode}; use ip_network::IpNetwork; use libp2p::{core::multiaddr, identity::PublicKey, multihash::Multihash, Multiaddr, PeerId}; +use linked_hash_set::LinkedHashSet; use multihash_codetable::{Code, MultihashDigest}; use log::{debug, error, log_enabled}; @@ -120,14 +121,22 @@ pub struct Worker { /// Interval to be proactive, publishing own addresses. publish_interval: ExpIncInterval, + /// Pro-actively publish our own addresses at this interval, if the keys in the keystore /// have changed. publish_if_changed_interval: ExpIncInterval, + /// List of keys onto which addresses have been published at the latest publication. /// Used to check whether they have changed. latest_published_keys: HashSet, + /// Same value as in the configuration. publish_non_global_ips: bool, + + /// Public addresses set by the node operator to always publish first in the authority + /// discovery DHT record. + public_addresses: LinkedHashSet, + /// Same value as in the configuration. strict_record_validation: bool, @@ -136,6 +145,7 @@ pub struct Worker { /// Queue of throttled lookups pending to be passed to the network. pending_lookups: Vec, + /// Set of in-flight lookups. in_flight_lookups: HashMap, @@ -224,6 +234,29 @@ where None => None, }; + let public_addresses = { + let local_peer_id: Multihash = network.local_peer_id().into(); + + config + .public_addresses + .into_iter() + .map(|mut address| { + if let Some(multiaddr::Protocol::P2p(peer_id)) = address.iter().last() { + if peer_id != local_peer_id { + error!( + target: LOG_TARGET, + "Discarding invalid local peer ID in public address {address}.", + ); + } + // Always discard `/p2p/...` protocol for proper address comparison (local + // peer id will be added before publishing). + address.pop(); + } + address + }) + .collect() + }; + Worker { from_service: from_service.fuse(), client, @@ -233,6 +266,7 @@ where publish_if_changed_interval, latest_published_keys: HashSet::new(), publish_non_global_ips: config.publish_non_global_ips, + public_addresses, strict_record_validation: config.strict_record_validation, query_interval, pending_lookups: Vec::new(), @@ -304,32 +338,48 @@ where } fn addresses_to_publish(&self) -> impl Iterator { - let peer_id: Multihash = self.network.local_peer_id().into(); let publish_non_global_ips = self.publish_non_global_ips; - let addresses = self.network.external_addresses().into_iter().filter(move |a| { - if publish_non_global_ips { - return true - } + let addresses = self + .public_addresses + .clone() + .into_iter() + .chain(self.network.external_addresses().into_iter().filter_map(|mut address| { + // Make sure the reported external address does not contain `/p2p/...` protocol. + if let Some(multiaddr::Protocol::P2p(_)) = address.iter().last() { + address.pop(); + } - a.iter().all(|p| match p { - // The `ip_network` library is used because its `is_global()` method is stable, - // while `is_global()` in the standard library currently isn't. - multiaddr::Protocol::Ip4(ip) if !IpNetwork::from(ip).is_global() => false, - multiaddr::Protocol::Ip6(ip) if !IpNetwork::from(ip).is_global() => false, - _ => true, + if self.public_addresses.contains(&address) { + // Already added above. + None + } else { + Some(address) + } + })) + .filter(move |address| { + if publish_non_global_ips { + return true + } + + address.iter().all(|protocol| match protocol { + // The `ip_network` library is used because its `is_global()` method is stable, + // while `is_global()` in the standard library currently isn't. + multiaddr::Protocol::Ip4(ip) if !IpNetwork::from(ip).is_global() => false, + multiaddr::Protocol::Ip6(ip) if !IpNetwork::from(ip).is_global() => false, + _ => true, + }) }) - }); + .collect::>(); - debug!(target: LOG_TARGET, "Authority DHT record peer_id='{:?}' addresses='{:?}'", peer_id, addresses.clone().collect::>()); + let peer_id = self.network.local_peer_id(); + debug!( + target: LOG_TARGET, + "Authority DHT record peer_id='{peer_id}' addresses='{addresses:?}'", + ); - // The address must include the peer id if not already set. - addresses.map(move |a| { - if a.iter().any(|p| matches!(p, multiaddr::Protocol::P2p(_))) { - a - } else { - a.with(multiaddr::Protocol::P2p(peer_id)) - } - }) + // The address must include the peer id. + let peer_id: Multihash = peer_id.into(); + addresses.into_iter().map(move |a| a.with(multiaddr::Protocol::P2p(peer_id))) } /// Publish own public addresses. diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index cbf74440dc1..c6f17647166 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -29,7 +29,7 @@ futures = "0.3.21" futures-timer = "3.0.2" ip_network = "0.4.1" libp2p = { version = "0.51.4", features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "request-response", "tcp", "tokio", "websocket", "yamux"] } -linked_hash_set = "0.1.3" +linked_hash_set = "0.1.4" log = { workspace = true, default-features = true } mockall = "0.11.3" parking_lot = "0.12.1" diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs index 77c26266aac..4e2121c5540 100644 --- a/substrate/client/network/src/discovery.rs +++ b/substrate/client/network/src/discovery.rs @@ -72,6 +72,7 @@ use libp2p::{ }, PeerId, }; +use linked_hash_set::LinkedHashSet; use log::{debug, info, trace, warn}; use sp_core::hexdisplay::HexDisplay; use std::{ @@ -550,14 +551,20 @@ impl NetworkBehaviour for DiscoveryBehaviour { ) -> Result, ConnectionDenied> { let Some(peer_id) = maybe_peer else { return Ok(Vec::new()) }; - let mut list = self + // Collect addresses into [`LinkedHashSet`] to eliminate duplicate entries preserving the + // order of addresses. Give priority to `permanent_addresses` (used with reserved nodes) and + // `ephemeral_addresses` (used for addresses discovered from other sources, like authority + // discovery DHT records). + let mut list: LinkedHashSet<_> = self .permanent_addresses .iter() .filter_map(|(p, a)| (*p == peer_id).then_some(a.clone())) - .collect::>(); + .collect(); if let Some(ephemeral_addresses) = self.ephemeral_addresses.get(&peer_id) { - list.extend(ephemeral_addresses.clone()); + ephemeral_addresses.iter().for_each(|address| { + list.insert_if_absent(address.clone()); + }); } { @@ -583,12 +590,14 @@ impl NetworkBehaviour for DiscoveryBehaviour { }); } - list.extend(list_to_filter); + list_to_filter.into_iter().for_each(|address| { + list.insert_if_absent(address); + }); } trace!(target: "sub-libp2p", "Addresses of {:?}: {:?}", peer_id, list); - Ok(list) + Ok(list.into_iter().collect()) } fn on_swarm_event(&mut self, event: FromSwarm) { -- GitLab From 2f59e9efa8142d02ee4893a1383debd3b6209019 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Fri, 22 Mar 2024 19:45:26 +0100 Subject: [PATCH 019/128] XCM remove extra QueryId types from traits (#3763) We do not need to make these traits generic over QueryId type, we can just use the QueryId alias everywhere --- polkadot/xcm/pallet-xcm/src/lib.rs | 11 +++---- polkadot/xcm/xcm-builder/src/controller.rs | 4 +-- polkadot/xcm/xcm-builder/src/pay.rs | 2 +- polkadot/xcm/xcm-builder/src/tests/mock.rs | 11 +++---- .../xcm-executor/src/traits/on_response.rs | 31 ++++++------------- 5 files changed, 22 insertions(+), 37 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 1a1f8b402a3..0761b375dfb 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -374,7 +374,7 @@ pub mod pallet { origin: OriginFor, timeout: BlockNumberFor, match_querier: VersionedLocation, - ) -> Result { + ) -> Result { let responder = ::ExecuteXcmOrigin::ensure_origin(origin)?; let query_id = ::new_query( responder, @@ -1478,7 +1478,6 @@ impl sp_std::fmt::Debug for FeesHandling { } impl QueryHandler for Pallet { - type QueryId = u64; type BlockNumber = BlockNumberFor; type Error = XcmError; type UniversalLocation = T::UniversalLocation; @@ -1488,7 +1487,7 @@ impl QueryHandler for Pallet { responder: impl Into, timeout: BlockNumberFor, match_querier: impl Into, - ) -> Self::QueryId { + ) -> QueryId { Self::do_new_query(responder, None, timeout, match_querier) } @@ -1498,7 +1497,7 @@ impl QueryHandler for Pallet { message: &mut Xcm<()>, responder: impl Into, timeout: Self::BlockNumber, - ) -> Result { + ) -> Result { let responder = responder.into(); let destination = Self::UniversalLocation::get() .invert_target(&responder) @@ -1511,7 +1510,7 @@ impl QueryHandler for Pallet { } /// Removes response when ready and emits [Event::ResponseTaken] event. - fn take_response(query_id: Self::QueryId) -> QueryResponseStatus { + fn take_response(query_id: QueryId) -> QueryResponseStatus { match Queries::::get(query_id) { Some(QueryStatus::Ready { response, at }) => match response.try_into() { Ok(response) => { @@ -1528,7 +1527,7 @@ impl QueryHandler for Pallet { } #[cfg(feature = "runtime-benchmarks")] - fn expect_response(id: Self::QueryId, response: Response) { + fn expect_response(id: QueryId, response: Response) { let response = response.into(); Queries::::insert( id, diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs index ba2b1fb44b8..04b19eaa587 100644 --- a/polkadot/xcm/xcm-builder/src/controller.rs +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -132,7 +132,7 @@ pub trait QueryController: QueryHandler { origin: Origin, timeout: Timeout, match_querier: VersionedLocation, - ) -> Result; + ) -> Result; } impl ExecuteController for () { @@ -186,7 +186,7 @@ impl QueryController for () { _origin: Origin, _timeout: Timeout, _match_querier: VersionedLocation, - ) -> Result { + ) -> Result { Ok(Default::default()) } } diff --git a/polkadot/xcm/xcm-builder/src/pay.rs b/polkadot/xcm/xcm-builder/src/pay.rs index 6b466483cfa..35b624b0415 100644 --- a/polkadot/xcm/xcm-builder/src/pay.rs +++ b/polkadot/xcm/xcm-builder/src/pay.rs @@ -88,7 +88,7 @@ impl< type Beneficiary = Beneficiary; type AssetKind = AssetKind; type Balance = u128; - type Id = Querier::QueryId; + type Id = QueryId; type Error = xcm::latest::Error; fn pay( diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index 4bf347ea771..3d03ab05424 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -38,7 +38,7 @@ pub use sp_std::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, fmt::Debug, }; -pub use xcm::latest::{prelude::*, Weight}; +pub use xcm::latest::{prelude::*, QueryId, Weight}; use xcm_executor::traits::{Properties, QueryHandler, QueryResponseStatus}; pub use xcm_executor::{ traits::{ @@ -414,7 +414,6 @@ pub struct TestQueryHandler(core::marker::PhantomData<(T, BlockN impl QueryHandler for TestQueryHandler { - type QueryId = u64; type BlockNumber = BlockNumber; type Error = XcmError; type UniversalLocation = T::UniversalLocation; @@ -423,7 +422,7 @@ impl QueryHandler responder: impl Into, _timeout: Self::BlockNumber, _match_querier: impl Into, - ) -> Self::QueryId { + ) -> QueryId { let query_id = 1; expect_response(query_id, responder.into()); query_id @@ -433,7 +432,7 @@ impl QueryHandler message: &mut Xcm<()>, responder: impl Into, timeout: Self::BlockNumber, - ) -> Result { + ) -> Result { let responder = responder.into(); let destination = Self::UniversalLocation::get() .invert_target(&responder) @@ -445,7 +444,7 @@ impl QueryHandler Ok(query_id) } - fn take_response(query_id: Self::QueryId) -> QueryResponseStatus { + fn take_response(query_id: QueryId) -> QueryResponseStatus { QUERIES .with(|q| { q.borrow().get(&query_id).and_then(|v| match v { @@ -460,7 +459,7 @@ impl QueryHandler } #[cfg(feature = "runtime-benchmarks")] - fn expect_response(_id: Self::QueryId, _response: xcm::latest::Response) { + fn expect_response(_id: QueryId, _response: xcm::latest::Response) { // Unnecessary since it's only a test implementation } } diff --git a/polkadot/xcm/xcm-executor/src/traits/on_response.rs b/polkadot/xcm/xcm-executor/src/traits/on_response.rs index 952bd2d0040..1049bacdca5 100644 --- a/polkadot/xcm/xcm-executor/src/traits/on_response.rs +++ b/polkadot/xcm/xcm-executor/src/traits/on_response.rs @@ -16,11 +16,8 @@ use crate::{Junctions::Here, Xcm}; use core::result; -use frame_support::{ - pallet_prelude::{Get, TypeInfo}, - parameter_types, -}; -use parity_scale_codec::{Decode, Encode, FullCodec, MaxEncodedLen}; +use frame_support::{pallet_prelude::Get, parameter_types}; +use parity_scale_codec::{Decode, Encode}; use sp_arithmetic::traits::Zero; use sp_std::fmt::Debug; use xcm::latest::{ @@ -115,15 +112,6 @@ pub enum QueryResponseStatus { /// Provides methods to expect responses from XCMs and query their status. pub trait QueryHandler { - type QueryId: From - + FullCodec - + MaxEncodedLen - + TypeInfo - + Clone - + Eq - + PartialEq - + Debug - + Copy; type BlockNumber: Zero + Encode; type Error; type UniversalLocation: Get; @@ -151,14 +139,14 @@ pub trait QueryHandler { message: &mut Xcm<()>, responder: impl Into, timeout: Self::BlockNumber, - ) -> result::Result; + ) -> result::Result; /// Attempt to remove and return the response of query with ID `query_id`. - fn take_response(id: Self::QueryId) -> QueryResponseStatus; + fn take_response(id: QueryId) -> QueryResponseStatus; /// Makes sure to expect a response with the given id. #[cfg(feature = "runtime-benchmarks")] - fn expect_response(id: Self::QueryId, response: Response); + fn expect_response(id: QueryId, response: Response); } parameter_types! { @@ -168,17 +156,16 @@ parameter_types! { impl QueryHandler for () { type BlockNumber = u64; type Error = (); - type QueryId = u64; type UniversalLocation = UniversalLocation; - fn take_response(_query_id: Self::QueryId) -> QueryResponseStatus { + fn take_response(_query_id: QueryId) -> QueryResponseStatus { QueryResponseStatus::NotFound } fn new_query( _responder: impl Into, _timeout: Self::BlockNumber, _match_querier: impl Into, - ) -> Self::QueryId { + ) -> QueryId { 0u64 } @@ -186,10 +173,10 @@ impl QueryHandler for () { _message: &mut Xcm<()>, _responder: impl Into, _timeout: Self::BlockNumber, - ) -> Result { + ) -> Result { Err(()) } #[cfg(feature = "runtime-benchmarks")] - fn expect_response(_id: Self::QueryId, _response: crate::Response) {} + fn expect_response(_id: QueryId, _response: crate::Response) {} } -- GitLab From 9a04ebbfb0b77f2dcdc46db6a1a2b455881edd03 Mon Sep 17 00:00:00 2001 From: girazoki Date: Fri, 22 Mar 2024 19:48:15 +0100 Subject: [PATCH 020/128] [pallet-xcm] fix transport fees for remote reserve transfers (#3792) Currently `transfer_assets` from pallet-xcm covers 4 main different transfer types: - `localReserve` - `DestinationReserve` - `Teleport` - `RemoteReserve` For the first three, the local execution and the remote message sending are separated, and fees are deducted in pallet-xcm itself: https://github.com/paritytech/polkadot-sdk/blob/3410dfb3929462da88be2da813f121d8b1cf46b3/polkadot/xcm/pallet-xcm/src/lib.rs#L1758. For the 4th case `RemoteReserve`, pallet-xcm is still relying on the xcm-executor itself to send the message (through the `initiateReserveWithdraw` instruction). In this case, if delivery fees need to be charged, it is not possible to do so because the `jit_withdraw` mode has not being set. This PR proposes to still use the `initiateReserveWithdraw` but prepending a `setFeesMode { jit_withdraw: true }` to make sure delivery fees can be paid. A test-case is also added to present the aforementioned case --------- Co-authored-by: Adrian Catangiu --- .../emulated/common/src/macros.rs | 2 +- .../tests/assets/asset-hub-rococo/src/lib.rs | 8 +- .../src/tests/reserve_transfer.rs | 73 ++------ .../asset-hub-rococo/src/tests/teleport.rs | 14 +- .../tests/assets/asset-hub-westend/src/lib.rs | 8 +- .../src/tests/reserve_transfer.rs | 71 ++------ .../asset-hub-westend/src/tests/teleport.rs | 14 +- .../people-rococo/src/tests/teleport.rs | 6 +- .../people-westend/src/tests/teleport.rs | 6 +- .../assets/test-utils/src/test_cases.rs | 4 +- .../assets/test-utils/src/xcm_helpers.rs | 5 +- .../runtimes/testing/penpal/src/lib.rs | 19 +- .../runtimes/testing/penpal/src/xcm_config.rs | 19 +- polkadot/xcm/pallet-xcm/src/lib.rs | 1 + polkadot/xcm/pallet-xcm/src/mock.rs | 15 +- .../pallet-xcm/src/tests/assets_transfer.rs | 171 ++++++++++++++++++ prdoc/pr_3792.prdoc | 19 ++ 17 files changed, 291 insertions(+), 164 deletions(-) create mode 100644 prdoc/pr_3792.prdoc diff --git a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs index 6951de6faa7..6f6bbe41e01 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs @@ -115,7 +115,7 @@ macro_rules! test_parachain_is_trusted_teleporter { let para_receiver_balance_after = <$receiver_para as $crate::macros::Chain>::account_data_of(receiver.clone()).free; let delivery_fees = <$sender_para>::execute_with(|| { - $crate::macros::asset_test_utils::xcm_helpers::transfer_assets_delivery_fees::< + $crate::macros::asset_test_utils::xcm_helpers::teleport_assets_delivery_fees::< <$sender_xcm_config as xcm_executor::Config>::XcmSender, >($assets.clone(), fee_asset_item, weight_limit.clone(), beneficiary, para_destination) }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 21d858f1fe5..a5a4914e21d 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -63,17 +63,13 @@ mod imports { // Runtimes pub use asset_hub_rococo_runtime::xcm_config::{ - TokenLocation as RelayLocation, UniversalLocation as AssetHubRococoUniversalLocation, - XcmConfig as AssetHubRococoXcmConfig, + TokenLocation as RelayLocation, XcmConfig as AssetHubRococoXcmConfig, }; pub use penpal_runtime::xcm_config::{ LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, - UniversalLocation as PenpalUniversalLocation, XcmConfig as PenpalRococoXcmConfig, - }; - pub use rococo_runtime::xcm_config::{ - UniversalLocation as RococoUniversalLocation, XcmConfig as RococoXcmConfig, }; + pub use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 705c9613b64..0a5956dedfd 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -524,7 +524,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let destination = Rococo::child_location_of(PenpalA::para_id()); let sender = RococoSender::get(); let amount_to_send: Balance = ROCOCO_ED * 1000; - let assets: Assets = (Here, amount_to_send).into(); // Init values fot Parachain let relay_native_asset_location = @@ -552,15 +551,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { test.set_dispatchable::(relay_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = Rococo::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &RococoUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -568,8 +558,8 @@ fn reserve_transfer_native_asset_from_relay_to_para() { >::balance(relay_native_asset_location.into(), &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -595,7 +585,7 @@ fn reserve_transfer_native_asset_from_para_to_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for Relay @@ -634,15 +624,6 @@ fn reserve_transfer_native_asset_from_para_to_relay() { test.set_dispatchable::(para_to_relay_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -650,8 +631,8 @@ fn reserve_transfer_native_asset_from_para_to_relay() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -705,16 +686,6 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = AssetHubRococo::execute_with(|| { - let reanchored_assets = assets - .reanchored(&destination, &AssetHubRococoUniversalLocation::get()) - .unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -722,8 +693,8 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { >::balance(system_para_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's assets is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's assets increased by `amount_to_send - delivery_fees - bought_execution`; @@ -738,7 +709,7 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { // Init values for Parachain let destination = PenpalA::sibling_location_of(AssetHubRococo::para_id()); let sender = PenpalASender::get(); - let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; + let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 10000; let assets: Assets = (Parent, amount_to_send).into(); let system_para_native_asset_location = v3::Location::try_from(RelayLocation::get()).expect("conversion works"); @@ -749,7 +720,7 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { ::RuntimeOrigin::signed(asset_owner), system_para_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for System Parachain @@ -788,15 +759,6 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { test.set_dispatchable::(para_to_system_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -804,8 +766,8 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -1084,7 +1046,7 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // fund the Parachain Origin's SA on Relay Chain with the native tokens held in reserve @@ -1118,13 +1080,6 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { test.set_dispatchable::(para_to_para_through_relay_limited_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -1135,8 +1090,8 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { >::balance(relay_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_assets_after > receiver_assets_before); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs index 0cc5ddb9f64..4432999aa95 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -322,7 +322,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -369,7 +369,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -410,7 +410,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -445,7 +445,7 @@ fn teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -492,7 +492,7 @@ fn teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -530,7 +530,7 @@ fn teleport_native_assets_from_system_para_to_relay_fails() { test.assert(); let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -593,7 +593,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { ::RuntimeOrigin::signed(asset_owner.clone()), system_para_native_asset_location, sender.clone(), - fee_amount_to_send, + fee_amount_to_send * 2, ); // No need to create the asset (only mint) as it exists in genesis. PenpalA::mint_asset( diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index 3f899d1dbdb..c9f5fe0647e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -67,17 +67,13 @@ mod imports { // Runtimes pub use asset_hub_westend_runtime::xcm_config::{ - UniversalLocation as AssetHubWestendUniversalLocation, WestendLocation as RelayLocation, - XcmConfig as AssetHubWestendXcmConfig, + WestendLocation as RelayLocation, XcmConfig as AssetHubWestendXcmConfig, }; pub use penpal_runtime::xcm_config::{ LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, - UniversalLocation as PenpalUniversalLocation, XcmConfig as PenpalWestendXcmConfig, - }; - pub use westend_runtime::xcm_config::{ - UniversalLocation as WestendUniversalLocation, XcmConfig as WestendXcmConfig, }; + pub use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 8c836132b54..64ad15ca312 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -524,7 +524,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let destination = Westend::child_location_of(PenpalA::para_id()); let sender = WestendSender::get(); let amount_to_send: Balance = WESTEND_ED * 1000; - let assets: Assets = (Here, amount_to_send).into(); // Init values fot Parachain let relay_native_asset_location = @@ -552,15 +551,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { test.set_dispatchable::(relay_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = Westend::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &WestendUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -568,8 +558,8 @@ fn reserve_transfer_native_asset_from_relay_to_para() { >::balance(relay_native_asset_location.into(), &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -595,7 +585,7 @@ fn reserve_transfer_native_asset_from_para_to_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for Relay @@ -634,15 +624,6 @@ fn reserve_transfer_native_asset_from_para_to_relay() { test.set_dispatchable::(para_to_relay_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -650,8 +631,8 @@ fn reserve_transfer_native_asset_from_para_to_relay() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -705,16 +686,6 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = AssetHubWestend::execute_with(|| { - let reanchored_assets = assets - .reanchored(&destination, &AssetHubWestendUniversalLocation::get()) - .unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -722,8 +693,8 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { >::balance(system_para_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's assets is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's assets increased by `amount_to_send - delivery_fees - bought_execution`; @@ -749,7 +720,7 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { ::RuntimeOrigin::signed(asset_owner), system_para_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for System Parachain @@ -789,15 +760,6 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { test.set_dispatchable::(para_to_system_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -805,8 +767,8 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -1086,7 +1048,7 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // fund the Parachain Origin's SA on Relay Chain with the native tokens held in reserve @@ -1120,13 +1082,6 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { test.set_dispatchable::(para_to_para_through_relay_limited_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -1137,8 +1092,8 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { >::balance(relay_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_assets_after > receiver_assets_before); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index 61f547fe7c5..aba05ea4322 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -322,7 +322,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -369,7 +369,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -410,7 +410,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -445,7 +445,7 @@ fn teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -492,7 +492,7 @@ fn teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -530,7 +530,7 @@ fn teleport_native_assets_from_system_para_to_relay_fails() { test.assert(); let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -593,7 +593,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { ::RuntimeOrigin::signed(asset_owner.clone()), system_para_native_asset_location, sender.clone(), - fee_amount_to_send, + fee_amount_to_send * 2, ); // No need to create the asset (only mint) as it exists in genesis. PenpalA::mint_asset( diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs index 3abe5c6cf66..350d87d638a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs @@ -155,7 +155,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -204,7 +204,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -248,7 +248,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs index eef35a99a83..8697477ba76 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs @@ -155,7 +155,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -204,7 +204,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -248,7 +248,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 53e10956bd0..2f2624d8e52 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -186,7 +186,7 @@ pub fn teleports_for_native_asset_works< // Mint funds into account to ensure it has enough balance to pay delivery fees let delivery_fees = - xcm_helpers::transfer_assets_delivery_fees::( + xcm_helpers::teleport_assets_delivery_fees::( (native_asset_id.clone(), native_asset_to_teleport_away.into()).into(), 0, Unlimited, @@ -579,7 +579,7 @@ pub fn teleports_for_foreign_assets_works< // Make sure the target account has enough native asset to pay for delivery fees let delivery_fees = - xcm_helpers::transfer_assets_delivery_fees::( + xcm_helpers::teleport_assets_delivery_fees::( (foreign_asset_id_location_latest.clone(), asset_to_teleport_away).into(), 0, Unlimited, diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs b/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs index f509a3a8aca..ca0e81fae42 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs @@ -18,11 +18,10 @@ use xcm::latest::prelude::*; -/// Returns the delivery fees amount for pallet xcm's `teleport_assets` and -/// `reserve_transfer_assets` extrinsics. +/// Returns the delivery fees amount for pallet xcm's `teleport_assets` extrinsics. /// Because it returns only a `u128`, it assumes delivery fees are only paid /// in one asset and that asset is known. -pub fn transfer_assets_delivery_fees( +pub fn teleport_assets_delivery_fees( assets: Assets, fee_asset_item: u32, weight_limit: WeightLimit, diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 1e6d485d148..1d404feac3d 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -57,7 +57,6 @@ use parachains_common::{ impls::{AssetsToBlockAuthor, NonZeroIssuance}, message_queue::{NarrowOriginToSibling, ParaIdToSibling}, }; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use smallvec::smallvec; use sp_api::impl_runtime_apis; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -85,7 +84,7 @@ use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; // XCM Imports use parachains_common::{AccountId, Signature}; -use xcm::latest::prelude::BodyId; +use xcm::latest::prelude::{AssetId as AssetLocationId, BodyId}; /// Balance of an account. pub type Balance = u128; @@ -545,6 +544,20 @@ impl pallet_message_queue::Config for Runtime { impl cumulus_pallet_aura_ext::Config for Runtime {} +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetLocationId = AssetLocationId(xcm_config::RelayLocation::get()); + /// The base fee for the message delivery fees (3 CENTS). + pub const BaseDeliveryFee: u128 = (1_000_000_000_000u128 / 100).saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ChannelInfo = ParachainSystem; @@ -555,7 +568,7 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = (); - type PriceForSiblingDelivery = NoPriceForMessageDelivery; + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } parameter_types! { diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index d83a877c2f8..639bfd95834 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -28,6 +28,7 @@ use super::{ ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, WeightToFee, XcmpQueue, }; +use crate::{BaseDeliveryFee, FeeAssetId, TransactionByteFee}; use core::marker::PhantomData; use frame_support::{ parameter_types, @@ -36,10 +37,10 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use parachains_common::xcm_config::AssetFeeAsExistentialDepositMultiplier; +use parachains_common::{xcm_config::AssetFeeAsExistentialDepositMultiplier, TREASURY_PALLET_ID}; use polkadot_parachain_primitives::primitives::Sibling; -use polkadot_runtime_common::impls::ToAuthor; -use sp_runtime::traits::ConvertInto; +use polkadot_runtime_common::{impls::ToAuthor, xcm_sender::ExponentialPrice}; +use sp_runtime::traits::{AccountIdConversion, ConvertInto}; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, @@ -49,6 +50,7 @@ use xcm_builder::{ SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, StartsWith, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::{traits::JustTry, XcmExecutor}; @@ -59,6 +61,7 @@ parameter_types! { pub const RelayNetwork: Option = None; pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [Parachain(ParachainInfo::parachain_id().into())].into(); + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); } /// Type for specifying how a `Location` can be converted into an `AccountId`. This is used @@ -331,7 +334,10 @@ impl xcm_executor::Config for XcmConfig { type MaxAssetsIntoHolding = MaxAssetsIntoHolding; type AssetLocker = (); type AssetExchanger = (); - type FeeManager = (); + type FeeManager = XcmFeeManagerFromComponents< + (), + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; @@ -355,11 +361,14 @@ pub type ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger = /// No local origins on this chain are allowed to dispatch XCM sends/executions. pub type LocalOriginToLocation = SignedToAccountId32; +pub type PriceForParentDelivery = + ExponentialPrice; + /// The means for routing XCM messages which are not for local execution into the right message /// queues. pub type XcmRouter = WithUniqueTopic<( // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, + cumulus_primitives_utility::ParentAsUmp, // ..and XCMP to communicate with the sibling chains. XcmpQueue, )>; diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 0761b375dfb..8a9e5288f2e 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -1990,6 +1990,7 @@ impl Pallet { ]); Ok(Xcm(vec![ WithdrawAsset(assets.into()), + SetFeesMode { jit_withdraw: true }, InitiateReserveWithdraw { assets: Wild(AllCounted(max_assets)), reserve, diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index b29562fc833..2cc228476ba 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -361,6 +361,10 @@ parameter_types! { 0, [Parachain(FOREIGN_ASSET_RESERVE_PARA_ID)] ); + pub PaidParaForeignReserveLocation: Location = Location::new( + 0, + [Parachain(Para3000::get())] + ); pub ForeignAsset: Asset = Asset { fun: Fungible(10), id: AssetId(Location::new( @@ -368,6 +372,13 @@ parameter_types! { [Parachain(FOREIGN_ASSET_RESERVE_PARA_ID), FOREIGN_ASSET_INNER_JUNCTION], )), }; + pub PaidParaForeignAsset: Asset = Asset { + fun: Fungible(10), + id: AssetId(Location::new( + 0, + [Parachain(Para3000::get())], + )), + }; pub UsdcReserveLocation: Location = Location::new( 0, [Parachain(USDC_RESERVE_PARA_ID)] @@ -450,6 +461,8 @@ parameter_types! { pub TrustedFilteredTeleport: (AssetFilter, Location) = (FilteredTeleportAsset::get().into(), FilteredTeleportLocation::get()); pub TeleportUsdtToForeign: (AssetFilter, Location) = (Usdt::get().into(), ForeignReserveLocation::get()); pub TrustedForeign: (AssetFilter, Location) = (ForeignAsset::get().into(), ForeignReserveLocation::get()); + pub TrustedPaidParaForeign: (AssetFilter, Location) = (PaidParaForeignAsset::get().into(), PaidParaForeignReserveLocation::get()); + pub TrustedUsdc: (AssetFilter, Location) = (Usdc::get().into(), UsdcReserveLocation::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; @@ -483,7 +496,7 @@ impl xcm_executor::Config for XcmConfig { type XcmSender = XcmRouter; type AssetTransactor = AssetTransactors; type OriginConverter = LocalOriginConverter; - type IsReserve = (Case, Case); + type IsReserve = (Case, Case, Case); type IsTeleporter = ( Case, Case, diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index 27be5cce145..7752d1355cd 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -2604,3 +2604,174 @@ fn teleport_assets_using_destination_reserve_fee_disallowed() { expected_result, ); } + +/// Test `tested_call` transferring single asset using remote reserve. +/// +/// Transferring Para3000 asset (`Para3000` reserve) to +/// `OTHER_PARA_ID` (no teleport trust), therefore triggering remote reserve. +/// Using the same asset asset (Para3000 reserve) for fees. +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +/// +/// Verifies that XCM router fees (`SendXcm::validate` -> `Assets`) are withdrawn from correct +/// user account and deposited to a correct target account (`XcmFeesTargetAccount`). +/// Verifies `expected_result`. +fn remote_asset_reserve_and_remote_fee_reserve_paid_call( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let weight = BaseXcmWeight::get() * 3; + let user_account = AccountId::from(XCM_FEES_NOT_WAIVED_USER_ACCOUNT); + let xcm_router_fee_amount = Para3000PaymentAmount::get(); + let paid_para_id = Para3000::get(); + let balances = vec![ + (user_account.clone(), INITIAL_BALANCE), + (ParaId::from(paid_para_id).into_account_truncating(), INITIAL_BALANCE), + (XcmFeesTargetAccount::get(), INITIAL_BALANCE), + ]; + let beneficiary: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset BLA + let foreign_initial_amount = 142; + let (reserve_location, _, foreign_asset_id_location) = set_up_foreign_asset( + paid_para_id, + None, + user_account.clone(), + foreign_initial_amount, + true, + ); + + // transfer destination is another chain that is not the reserve location + // the goal is to trigger the remoteReserve case + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + + let transferred_asset: Assets = (foreign_asset_id_location.clone(), SEND_AMOUNT).into(); + + // balances checks before + assert_eq!( + AssetsPallet::balance(foreign_asset_id_location.clone(), user_account.clone()), + foreign_initial_amount + ); + assert_eq!(Balances::free_balance(user_account.clone()), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(user_account.clone()), + Box::new(dest.clone().into()), + Box::new(beneficiary.clone().into()), + Box::new(transferred_asset.into()), + 0 as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + if expected_result.is_err() { + // short-circuit here for tests where we expect failure + return; + } + + let mut last_events = last_events(7).into_iter(); + // asset events + // forceCreate + last_events.next().unwrap(); + // mint tokens + last_events.next().unwrap(); + // burn tokens + last_events.next().unwrap(); + // balance events + // burn delivery fee + last_events.next().unwrap(); + // mint delivery fee + last_events.next().unwrap(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { + outcome: Outcome::Complete { used: weight } + }) + ); + + // user account spent (transferred) amount + assert_eq!( + AssetsPallet::balance(foreign_asset_id_location.clone(), user_account.clone()), + foreign_initial_amount - SEND_AMOUNT + ); + + // user account spent delivery fees + assert_eq!(Balances::free_balance(user_account), INITIAL_BALANCE - xcm_router_fee_amount); + + // XcmFeesTargetAccount where should lend xcm_router_fee_amount + assert_eq!( + Balances::free_balance(XcmFeesTargetAccount::get()), + INITIAL_BALANCE + xcm_router_fee_amount + ); + + // Verify total and active issuance of foreign BLA have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!( + AssetsPallet::total_issuance(foreign_asset_id_location.clone()), + expected_issuance + ); + assert_eq!( + AssetsPallet::active_issuance(foreign_asset_id_location.clone()), + expected_issuance + ); + + let context = UniversalLocation::get(); + let foreign_id_location_reanchored = + foreign_asset_id_location.reanchored(&dest, &context).unwrap(); + let dest_reanchored = dest.reanchored(&reserve_location, &context).unwrap(); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + reserve_location, + // `assets` are burned on source and withdrawn from SA in remote reserve chain + Xcm(vec![ + WithdrawAsset((Location::here(), SEND_AMOUNT).into()), + ClearOrigin, + buy_execution((Location::here(), SEND_AMOUNT / 2)), + DepositReserveAsset { + assets: Wild(AllCounted(1)), + // final destination is `dest` as seen by `reserve` + dest: dest_reanchored, + // message sent onward to `dest` + xcm: Xcm(vec![ + buy_execution((foreign_id_location_reanchored, SEND_AMOUNT / 2)), + DepositAsset { assets: AllCounted(1).into(), beneficiary } + ]) + } + ]) + )] + ); + }); +} +/// Test `transfer_assets` with remote asset reserve and remote fee reserve. +#[test] +fn transfer_assets_with_remote_asset_reserve_and_remote_asset_fee_reserve_paid_works() { + let expected_result = Ok(()); + remote_asset_reserve_and_remote_fee_reserve_paid_call( + XcmPallet::transfer_assets, + expected_result, + ); +} +/// Test `limited_reserve_transfer_assets` with remote asset reserve and remote fee reserve. +#[test] +fn limited_reserve_transfer_assets_with_remote_asset_reserve_and_remote_asset_fee_reserve_paid_works( +) { + let expected_result = Ok(()); + remote_asset_reserve_and_remote_fee_reserve_paid_call( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} diff --git a/prdoc/pr_3792.prdoc b/prdoc/pr_3792.prdoc new file mode 100644 index 00000000000..cbcdc29a9c6 --- /dev/null +++ b/prdoc/pr_3792.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet-xcm] fix transport fees for remote reserve transfers" + +doc: + - audience: Runtime Dev + description: | + This PR fixes `pallet_xcm::transfer_assets` and + `pallet_xcm::limited_reserve_transfer_assets` extrinsics for transfers + that need to go through remote reserves. The fix is adding a + `SetFeesMode { jit_withdraw: true }` instruction before local execution of + `InitiateReserveWithdraw` so that delivery fees are correctly charged by + the xcm-executor. Without this change, a runtime that has implemented + delivery fees would not be able to execute remote reserve transfers using + these extrinsics. + +crates: + - name: pallet-xcm -- GitLab From 19490abae08570ce87c3cae57542e928aa9a149d Mon Sep 17 00:00:00 2001 From: eskimor Date: Sat, 23 Mar 2024 06:46:15 +0100 Subject: [PATCH 021/128] Fix xcm config for coretime. (#3768) Fixes https://github.com/paritytech/polkadot-sdk/issues/3762 . --------- Co-authored-by: eskimor Co-authored-by: Adrian Catangiu --- polkadot/runtime/westend/src/xcm_config.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 400843c5835..96d2a124ff9 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -41,10 +41,11 @@ use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, DescribeAllTerminal, DescribeFamily, FrameTransactionalProcessor, - FungibleAdapter, HashedDescription, IsConcrete, MintLocation, OriginToPluralityVoice, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, - XcmFeeManagerFromComponents, XcmFeeToAccount, + FungibleAdapter, HashedDescription, IsChildSystemParachain, IsConcrete, MintLocation, + OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -142,13 +143,12 @@ impl Contains for OnlyParachains { } } -pub struct CollectivesOrFellows; -impl Contains for CollectivesOrFellows { +pub struct Fellows; +impl Contains for Fellows { fn contains(location: &Location) -> bool { matches!( location.unpack(), - (0, [Parachain(COLLECTIVES_ID)]) | - (0, [Parachain(COLLECTIVES_ID), Plurality { id: BodyId::Technical, .. }]) + (0, [Parachain(COLLECTIVES_ID), Plurality { id: BodyId::Technical, .. }]) ) } } @@ -172,8 +172,8 @@ pub type Barrier = TrailingSetTopicAsId<( AllowTopLevelPaidExecutionFrom, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, - // Collectives and Fellows plurality get free execution. - AllowExplicitUnpaidExecutionFrom, + // Messages from system parachains or the Fellows plurality need not pay for execution. + AllowExplicitUnpaidExecutionFrom<(IsChildSystemParachain, Fellows)>, ), UniversalLocation, ConstU32<8>, -- GitLab From 463ccb8f9028ebf5f7fc53d7e835c21f43172253 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 24 Mar 2024 15:21:18 +0100 Subject: [PATCH 022/128] pallet-aura: Expose `SlotDuration` as constant (#3732) --- substrate/frame/aura/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/substrate/frame/aura/src/lib.rs b/substrate/frame/aura/src/lib.rs index 997f51ba428..3ca1444aaae 100644 --- a/substrate/frame/aura/src/lib.rs +++ b/substrate/frame/aura/src/lib.rs @@ -114,6 +114,7 @@ pub mod pallet { /// The effective value of this type should not change while the chain is running. /// /// For backwards compatibility either use [`MinimumPeriodTimesTwo`] or a const. + #[pallet::constant] type SlotDuration: Get<::Moment>; } -- GitLab From e88d1cb79315792a3dbccb6bdef2543093ecaf5b Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Mon, 25 Mar 2024 10:40:22 +0100 Subject: [PATCH 023/128] [docs] Update ci image in container.md (#3799) cc https://github.com/paritytech/ci_cd/issues/943 --- docs/contributor/container.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/docs/contributor/container.md b/docs/contributor/container.md index dd44b31bfe9..9c542f411c8 100644 --- a/docs/contributor/container.md +++ b/docs/contributor/container.md @@ -16,7 +16,7 @@ Parity builds and publishes a container image that can be found as `docker.io/pa ## Parity CI image Parity maintains and uses internally a generic "CI" image that can be used as a base to build binaries: [Parity CI -container image](https://github.com/paritytech/scripts/tree/master/dockerfiles/ci-linux): +container image](https://github.com/paritytech/scripts/tree/master/dockerfiles/ci-unified): The command below allows building a Linux binary without having to even install Rust or any dependency locally: @@ -24,14 +24,11 @@ The command below allows building a Linux binary without having to even install docker run --rm -it \ -w /polkadot-sdk \ -v $(pwd):/polkadot-sdk \ - paritytech/ci-linux:production \ + paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240222 \ cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain sudo chown -R $(id -u):$(id -g) target/ ``` -If you want to reproduce other steps of CI process you can use the following -[guide](https://github.com/paritytech/scripts#gitlab-ci-for-building-docker-images). - ## Injected image Injecting a binary inside a base image is the quickest option to get a working container image. This only works if you -- GitLab From 0711729d251efebf3486db602119ecfa67d98366 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Mon, 25 Mar 2024 13:11:30 +0100 Subject: [PATCH 024/128] [Bridges] Move chain definitions to separate folder (#3822) Related to https://github.com/paritytech/parity-bridges-common/issues/2538 This PR doesn't contain any functional changes. The PR moves specific bridged chain definitions from `bridges/primitives` to `bridges/chains` folder in order to facilitate the migration of the `parity-bridges-repo` into `polkadot-sdk` as discussed in https://hackmd.io/LprWjZ0bQXKpFeveYHIRXw?view Apart from this it also includes some cosmetic changes to some `Cargo.toml` files as a result of running `diener workspacify`. --- Cargo.toml | 24 +++++++++---------- .../chain-asset-hub-rococo/Cargo.toml | 2 +- .../chain-asset-hub-rococo/src/lib.rs | 0 .../chain-asset-hub-westend/Cargo.toml | 2 +- .../chain-asset-hub-westend/src/lib.rs | 0 .../chain-bridge-hub-cumulus/Cargo.toml | 6 ++--- .../chain-bridge-hub-cumulus/src/lib.rs | 0 .../chain-bridge-hub-kusama/Cargo.toml | 4 ++-- .../chain-bridge-hub-kusama/src/lib.rs | 0 .../chain-bridge-hub-polkadot/Cargo.toml | 4 ++-- .../chain-bridge-hub-polkadot/src/lib.rs | 0 .../chain-bridge-hub-rococo/Cargo.toml | 4 ++-- .../chain-bridge-hub-rococo/src/lib.rs | 0 .../chain-bridge-hub-westend/Cargo.toml | 4 ++-- .../chain-bridge-hub-westend/src/lib.rs | 0 .../chain-kusama/Cargo.toml | 6 ++--- .../chain-kusama/src/lib.rs | 0 .../chain-polkadot-bulletin/Cargo.toml | 8 +++---- .../chain-polkadot-bulletin/src/lib.rs | 0 .../chain-polkadot/Cargo.toml | 6 ++--- .../chain-polkadot/src/lib.rs | 0 .../chain-rococo/Cargo.toml | 6 ++--- .../chain-rococo/src/lib.rs | 0 .../chain-westend/Cargo.toml | 6 ++--- .../chain-westend/src/lib.rs | 0 .../pallets/ethereum-client/Cargo.toml | 4 ++-- .../pallets/inbound-queue/Cargo.toml | 2 +- cumulus/client/consensus/aura/Cargo.toml | 2 +- .../assets/asset-hub-rococo/Cargo.toml | 2 +- .../assets/asset-hub-westend/Cargo.toml | 2 +- .../bridges/bridge-hub-rococo/Cargo.toml | 2 +- .../bridges/bridge-hub-westend/Cargo.toml | 2 +- .../collectives-westend/Cargo.toml | 2 +- .../people/people-rococo/Cargo.toml | 2 +- .../people/people-westend/Cargo.toml | 2 +- .../parachains/testing/penpal/Cargo.toml | 2 +- .../emulated/chains/relays/rococo/Cargo.toml | 2 +- .../emulated/chains/relays/westend/Cargo.toml | 2 +- .../tests/assets/asset-hub-rococo/Cargo.toml | 2 +- .../tests/assets/asset-hub-westend/Cargo.toml | 6 ++--- .../bridges/bridge-hub-rococo/Cargo.toml | 4 ++-- .../bridges/bridge-hub-westend/Cargo.toml | 4 ++-- .../tests/people/people-rococo/Cargo.toml | 2 +- .../tests/people/people-westend/Cargo.toml | 2 +- .../pallets/collective-content/Cargo.toml | 2 +- .../assets/asset-hub-rococo/Cargo.toml | 8 +++---- .../assets/asset-hub-westend/Cargo.toml | 8 +++---- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 16 ++++++------- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 14 +++++------ .../storage-weight-reclaim/Cargo.toml | 4 ++-- polkadot/Cargo.toml | 2 +- polkadot/node/core/pvf/Cargo.toml | 2 +- templates/solochain/runtime/Cargo.toml | 2 +- 53 files changed, 94 insertions(+), 94 deletions(-) rename bridges/{primitives => chains}/chain-asset-hub-rococo/Cargo.toml (87%) rename bridges/{primitives => chains}/chain-asset-hub-rococo/src/lib.rs (100%) rename bridges/{primitives => chains}/chain-asset-hub-westend/Cargo.toml (87%) rename bridges/{primitives => chains}/chain-asset-hub-westend/src/lib.rs (100%) rename bridges/{primitives => chains}/chain-bridge-hub-cumulus/Cargo.toml (80%) rename bridges/{primitives => chains}/chain-bridge-hub-cumulus/src/lib.rs (100%) rename bridges/{primitives => chains}/chain-bridge-hub-kusama/Cargo.toml (85%) rename bridges/{primitives => chains}/chain-bridge-hub-kusama/src/lib.rs (100%) rename bridges/{primitives => chains}/chain-bridge-hub-polkadot/Cargo.toml (85%) rename bridges/{primitives => chains}/chain-bridge-hub-polkadot/src/lib.rs (100%) rename bridges/{primitives => chains}/chain-bridge-hub-rococo/Cargo.toml (85%) rename bridges/{primitives => chains}/chain-bridge-hub-rococo/src/lib.rs (100%) rename bridges/{primitives => chains}/chain-bridge-hub-westend/Cargo.toml (85%) rename bridges/{primitives => chains}/chain-bridge-hub-westend/src/lib.rs (100%) rename bridges/{primitives => chains}/chain-kusama/Cargo.toml (73%) rename bridges/{primitives => chains}/chain-kusama/src/lib.rs (100%) rename bridges/{primitives => chains}/chain-polkadot-bulletin/Cargo.toml (78%) rename bridges/{primitives => chains}/chain-polkadot-bulletin/src/lib.rs (100%) rename bridges/{primitives => chains}/chain-polkadot/Cargo.toml (73%) rename bridges/{primitives => chains}/chain-polkadot/src/lib.rs (100%) rename bridges/{primitives => chains}/chain-rococo/Cargo.toml (73%) rename bridges/{primitives => chains}/chain-rococo/src/lib.rs (100%) rename bridges/{primitives => chains}/chain-westend/Cargo.toml (73%) rename bridges/{primitives => chains}/chain-westend/src/lib.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 01d6ef8e87b..5eeac597827 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,24 +10,24 @@ resolver = "2" members = [ "bridges/bin/runtime-common", + "bridges/chains/chain-asset-hub-rococo", + "bridges/chains/chain-asset-hub-westend", + "bridges/chains/chain-bridge-hub-cumulus", + "bridges/chains/chain-bridge-hub-kusama", + "bridges/chains/chain-bridge-hub-polkadot", + "bridges/chains/chain-bridge-hub-rococo", + "bridges/chains/chain-bridge-hub-westend", + "bridges/chains/chain-kusama", + "bridges/chains/chain-polkadot", + "bridges/chains/chain-polkadot-bulletin", + "bridges/chains/chain-rococo", + "bridges/chains/chain-westend", "bridges/modules/grandpa", "bridges/modules/messages", "bridges/modules/parachains", "bridges/modules/relayers", "bridges/modules/xcm-bridge-hub", "bridges/modules/xcm-bridge-hub-router", - "bridges/primitives/chain-asset-hub-rococo", - "bridges/primitives/chain-asset-hub-westend", - "bridges/primitives/chain-bridge-hub-cumulus", - "bridges/primitives/chain-bridge-hub-kusama", - "bridges/primitives/chain-bridge-hub-polkadot", - "bridges/primitives/chain-bridge-hub-rococo", - "bridges/primitives/chain-bridge-hub-westend", - "bridges/primitives/chain-kusama", - "bridges/primitives/chain-polkadot", - "bridges/primitives/chain-polkadot-bulletin", - "bridges/primitives/chain-rococo", - "bridges/primitives/chain-westend", "bridges/primitives/header-chain", "bridges/primitives/messages", "bridges/primitives/parachains", diff --git a/bridges/primitives/chain-asset-hub-rococo/Cargo.toml b/bridges/chains/chain-asset-hub-rococo/Cargo.toml similarity index 87% rename from bridges/primitives/chain-asset-hub-rococo/Cargo.toml rename to bridges/chains/chain-asset-hub-rococo/Cargo.toml index 4dfa149e0ea..07c9b3b5289 100644 --- a/bridges/primitives/chain-asset-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-asset-hub-rococo/Cargo.toml @@ -17,7 +17,7 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive frame-support = { path = "../../../substrate/frame/support", default-features = false } # Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } +bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } [features] default = ["std"] diff --git a/bridges/primitives/chain-asset-hub-rococo/src/lib.rs b/bridges/chains/chain-asset-hub-rococo/src/lib.rs similarity index 100% rename from bridges/primitives/chain-asset-hub-rococo/src/lib.rs rename to bridges/chains/chain-asset-hub-rococo/src/lib.rs diff --git a/bridges/primitives/chain-asset-hub-westend/Cargo.toml b/bridges/chains/chain-asset-hub-westend/Cargo.toml similarity index 87% rename from bridges/primitives/chain-asset-hub-westend/Cargo.toml rename to bridges/chains/chain-asset-hub-westend/Cargo.toml index c9bd437562b..f75236ee1b3 100644 --- a/bridges/primitives/chain-asset-hub-westend/Cargo.toml +++ b/bridges/chains/chain-asset-hub-westend/Cargo.toml @@ -17,7 +17,7 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive frame-support = { path = "../../../substrate/frame/support", default-features = false } # Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } +bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } [features] default = ["std"] diff --git a/bridges/primitives/chain-asset-hub-westend/src/lib.rs b/bridges/chains/chain-asset-hub-westend/src/lib.rs similarity index 100% rename from bridges/primitives/chain-asset-hub-westend/src/lib.rs rename to bridges/chains/chain-asset-hub-westend/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml similarity index 80% rename from bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml rename to bridges/chains/chain-bridge-hub-cumulus/Cargo.toml index d35eefa1c45..5e14cb052b7 100644 --- a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml @@ -12,9 +12,9 @@ workspace = true [dependencies] # Bridge Dependencies -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-messages = { path = "../messages", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs b/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs rename to bridges/chains/chain-bridge-hub-cumulus/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml similarity index 85% rename from bridges/primitives/chain-bridge-hub-kusama/Cargo.toml rename to bridges/chains/chain-bridge-hub-kusama/Cargo.toml index 8d71b3f5eb7..77bc8e54a9d 100644 --- a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml @@ -13,8 +13,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-kusama/src/lib.rs b/bridges/chains/chain-bridge-hub-kusama/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-kusama/src/lib.rs rename to bridges/chains/chain-bridge-hub-kusama/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml similarity index 85% rename from bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml rename to bridges/chains/chain-bridge-hub-polkadot/Cargo.toml index 4e89e8a5c9a..5d7a3bbcc1d 100644 --- a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml @@ -14,8 +14,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-polkadot/src/lib.rs b/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-polkadot/src/lib.rs rename to bridges/chains/chain-bridge-hub-polkadot/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-rococo/Cargo.toml b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml similarity index 85% rename from bridges/primitives/chain-bridge-hub-rococo/Cargo.toml rename to bridges/chains/chain-bridge-hub-rococo/Cargo.toml index 1643d934a98..3966ef72dcb 100644 --- a/bridges/primitives/chain-bridge-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml @@ -13,8 +13,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-rococo/src/lib.rs rename to bridges/chains/chain-bridge-hub-rococo/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-westend/Cargo.toml b/bridges/chains/chain-bridge-hub-westend/Cargo.toml similarity index 85% rename from bridges/primitives/chain-bridge-hub-westend/Cargo.toml rename to bridges/chains/chain-bridge-hub-westend/Cargo.toml index 32a7850c539..d35eac8b3fe 100644 --- a/bridges/primitives/chain-bridge-hub-westend/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-westend/Cargo.toml @@ -14,8 +14,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-westend/src/lib.rs b/bridges/chains/chain-bridge-hub-westend/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-westend/src/lib.rs rename to bridges/chains/chain-bridge-hub-westend/src/lib.rs diff --git a/bridges/primitives/chain-kusama/Cargo.toml b/bridges/chains/chain-kusama/Cargo.toml similarity index 73% rename from bridges/primitives/chain-kusama/Cargo.toml rename to bridges/chains/chain-kusama/Cargo.toml index 0660f346023..4ff4cb46976 100644 --- a/bridges/primitives/chain-kusama/Cargo.toml +++ b/bridges/chains/chain-kusama/Cargo.toml @@ -13,9 +13,9 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-kusama/src/lib.rs b/bridges/chains/chain-kusama/src/lib.rs similarity index 100% rename from bridges/primitives/chain-kusama/src/lib.rs rename to bridges/chains/chain-kusama/src/lib.rs diff --git a/bridges/primitives/chain-polkadot-bulletin/Cargo.toml b/bridges/chains/chain-polkadot-bulletin/Cargo.toml similarity index 78% rename from bridges/primitives/chain-polkadot-bulletin/Cargo.toml rename to bridges/chains/chain-polkadot-bulletin/Cargo.toml index 15c824fcbdb..d10c4043967 100644 --- a/bridges/primitives/chain-polkadot-bulletin/Cargo.toml +++ b/bridges/chains/chain-polkadot-bulletin/Cargo.toml @@ -15,10 +15,10 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-messages = { path = "../messages", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-polkadot-bulletin/src/lib.rs b/bridges/chains/chain-polkadot-bulletin/src/lib.rs similarity index 100% rename from bridges/primitives/chain-polkadot-bulletin/src/lib.rs rename to bridges/chains/chain-polkadot-bulletin/src/lib.rs diff --git a/bridges/primitives/chain-polkadot/Cargo.toml b/bridges/chains/chain-polkadot/Cargo.toml similarity index 73% rename from bridges/primitives/chain-polkadot/Cargo.toml rename to bridges/chains/chain-polkadot/Cargo.toml index 6421b7f4010..0db6791f66e 100644 --- a/bridges/primitives/chain-polkadot/Cargo.toml +++ b/bridges/chains/chain-polkadot/Cargo.toml @@ -13,9 +13,9 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-polkadot/src/lib.rs b/bridges/chains/chain-polkadot/src/lib.rs similarity index 100% rename from bridges/primitives/chain-polkadot/src/lib.rs rename to bridges/chains/chain-polkadot/src/lib.rs diff --git a/bridges/primitives/chain-rococo/Cargo.toml b/bridges/chains/chain-rococo/Cargo.toml similarity index 73% rename from bridges/primitives/chain-rococo/Cargo.toml rename to bridges/chains/chain-rococo/Cargo.toml index de373f0ae64..9c63f960ae4 100644 --- a/bridges/primitives/chain-rococo/Cargo.toml +++ b/bridges/chains/chain-rococo/Cargo.toml @@ -13,9 +13,9 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-rococo/src/lib.rs b/bridges/chains/chain-rococo/src/lib.rs similarity index 100% rename from bridges/primitives/chain-rococo/src/lib.rs rename to bridges/chains/chain-rococo/src/lib.rs diff --git a/bridges/primitives/chain-westend/Cargo.toml b/bridges/chains/chain-westend/Cargo.toml similarity index 73% rename from bridges/primitives/chain-westend/Cargo.toml rename to bridges/chains/chain-westend/Cargo.toml index e55a8d649a8..f5de9b95c82 100644 --- a/bridges/primitives/chain-westend/Cargo.toml +++ b/bridges/chains/chain-westend/Cargo.toml @@ -13,9 +13,9 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-westend/src/lib.rs b/bridges/chains/chain-westend/src/lib.rs similarity index 100% rename from bridges/primitives/chain-westend/src/lib.rs rename to bridges/chains/chain-westend/src/lib.rs diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index c8999633c97..cadd542432e 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -36,7 +36,7 @@ sp-io = { path = "../../../../substrate/primitives/io", default-features = false snowbridge-core = { path = "../../primitives/core", default-features = false } snowbridge-ethereum = { path = "../../primitives/ethereum", default-features = false } -snowbridge-pallet-ethereum-client-fixtures = { path = "./fixtures", default-features = false, optional = true } +snowbridge-pallet-ethereum-client-fixtures = { path = "fixtures", default-features = false, optional = true } primitives = { package = "snowbridge-beacon-primitives", path = "../../primitives/beacon", default-features = false } static_assertions = { version = "1.1.0", default-features = false } bp-runtime = { path = "../../../primitives/runtime", default-features = false } @@ -48,7 +48,7 @@ sp-keyring = { path = "../../../../substrate/primitives/keyring" } serde_json = { workspace = true, default-features = true } hex-literal = "0.4.1" pallet-timestamp = { path = "../../../../substrate/frame/timestamp" } -snowbridge-pallet-ethereum-client-fixtures = { path = "./fixtures" } +snowbridge-pallet-ethereum-client-fixtures = { path = "fixtures" } sp-io = { path = "../../../../substrate/primitives/io" } serde = { workspace = true, default-features = true } diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index b850496cd4e..9fc1f31fbf7 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -42,7 +42,7 @@ snowbridge-core = { path = "../../primitives/core", default-features = false } snowbridge-ethereum = { path = "../../primitives/ethereum", default-features = false } snowbridge-router-primitives = { path = "../../primitives/router", default-features = false } snowbridge-beacon-primitives = { path = "../../primitives/beacon", default-features = false } -snowbridge-pallet-inbound-queue-fixtures = { path = "./fixtures", default-features = false, optional = true } +snowbridge-pallet-inbound-queue-fixtures = { path = "fixtures", default-features = false, optional = true } [dev-dependencies] frame-benchmarking = { path = "../../../../substrate/frame/benchmarking" } diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index e815e89d8ce..58bb1dd5914 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -41,7 +41,7 @@ substrate-prometheus-endpoint = { path = "../../../../substrate/utils/prometheus cumulus-client-consensus-common = { path = "../common" } cumulus-relay-chain-interface = { path = "../../relay-chain-interface" } cumulus-client-consensus-proposer = { path = "../proposer" } -cumulus-client-parachain-inherent = { path = "../../../client/parachain-inherent" } +cumulus-client-parachain-inherent = { path = "../../parachain-inherent" } cumulus-primitives-aura = { path = "../../../primitives/aura" } cumulus-primitives-core = { path = "../../../primitives/core" } cumulus-client-collator = { path = "../../collator" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml index f4f8b3603ba..98762beb0cb 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } asset-hub-rococo-runtime = { path = "../../../../../../runtimes/assets/asset-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml index d4764f63bf6..a42a9abf618 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } asset-hub-westend-runtime = { path = "../../../../../../runtimes/assets/asset-hub-westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml index 322d8b44e6e..789f10a35f2 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } emulated-integration-tests-common = { path = "../../../../common", default-features = false } bridge-hub-rococo-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-rococo" } bridge-hub-common = { path = "../../../../../../runtimes/bridge-hubs/common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml index ec1386b7f6e..d82971cf55a 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } emulated-integration-tests-common = { path = "../../../../common", default-features = false } bridge-hub-westend-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-westend" } bridge-hub-common = { path = "../../../../../../runtimes/bridge-hubs/common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml index 03f755b666a..4c2a7d3c274 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } collectives-westend-runtime = { path = "../../../../../../runtimes/collectives/collectives-westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml index 65a358d0ef2..f7fe93d2777 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml @@ -14,7 +14,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } people-rococo-runtime = { path = "../../../../../../runtimes/people/people-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml index 075698848bc..57a767e0c2a 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml @@ -14,7 +14,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } people-westend-runtime = { path = "../../../../../../runtimes/people/people-westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml index f47350b00eb..2ac508273c6 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml @@ -20,7 +20,7 @@ frame-support = { path = "../../../../../../../../substrate/frame/support", defa xcm = { package = "staging-xcm", path = "../../../../../../../../polkadot/xcm", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } penpal-runtime = { path = "../../../../../../runtimes/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml index 2d27426cca7..7ac65b0ee1d 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml @@ -25,5 +25,5 @@ rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococ rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } emulated-integration-tests-common = { path = "../../../common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml index abc40c20406..20aedb50e6a 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml @@ -27,5 +27,5 @@ westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/west westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } emulated-integration-tests-common = { path = "../../../common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index 13eb7d8dfc4..9b519da4b1d 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -30,7 +30,7 @@ rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } # Cumulus asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } cumulus-pallet-parachain-system = { path = "../../../../../../pallets/parachain-system", default-features = false } testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["rococo"] } asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml index 8ac8efb5218..3121ed028eb 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml @@ -31,12 +31,12 @@ pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-fe westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["westend"] } penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } asset-hub-westend-runtime = { path = "../../../../../runtimes/assets/asset-hub-westend" } asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -cumulus-pallet-xcmp-queue = { default-features = false, path = "../../../../../../pallets/xcmp-queue" } -cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../../pallets/parachain-system" } +cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../../../../pallets/parachain-system", default-features = false } emulated-integration-tests-common = { path = "../../../common", default-features = false } westend-system-emulated-network = { path = "../../../networks/westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index 89f0d2a9ca6..18c39f895fa 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -34,10 +34,10 @@ pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["rococo"] } cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } -bridge-hub-rococo-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } +bridge-hub-rococo-runtime = { path = "../../../../../runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } emulated-integration-tests-common = { path = "../../../common", default-features = false } rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } rococo-system-emulated-network = { path = "../../../networks/rococo-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index 9d55903c858..9059d841a48 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -30,8 +30,8 @@ pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } -bridge-hub-westend-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-westend", default-features = false } +bridge-hub-westend-runtime = { path = "../../../../../runtimes/bridge-hubs/bridge-hub-westend", default-features = false } emulated-integration-tests-common = { path = "../../../common", default-features = false } rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml index 609376c1fee..1570aa7662f 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml @@ -26,7 +26,7 @@ polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common # Cumulus asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } people-rococo-runtime = { path = "../../../../../runtimes/people/people-rococo" } emulated-integration-tests-common = { path = "../../../common", default-features = false } rococo-system-emulated-network = { path = "../../../networks/rococo-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index f2f3366798a..bc093dc0de6 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -26,7 +26,7 @@ polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common # Cumulus asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } people-westend-runtime = { path = "../../../../../runtimes/people/people-westend" } emulated-integration-tests-common = { path = "../../../common", default-features = false } westend-system-emulated-network = { path = "../../../networks/westend-system" } diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index 691be02f5b8..d4290dd2de2 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", optional = true, default-features = false } +frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system", default-features = false } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 95691a045b7..53abb620022 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -86,10 +86,10 @@ assets-common = { path = "../common", default-features = false } # Bridges pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } -bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } +bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } snowbridge-router-primitives = { path = "../../../../../bridges/snowbridge/primitives/router", default-features = false } [dev-dependencies] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 950c6e62d72..0f8a1182cd7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -84,10 +84,10 @@ assets-common = { path = "../common", default-features = false } # Bridges pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } -bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } +bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } [dev-dependencies] hex-literal = "0.4.1" diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 8a6823ea3ee..13b4b624eef 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -85,20 +85,20 @@ parachains-common = { path = "../../../common", default-features = false } testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } # Bridges -bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-polkadot = { path = "../../../../../bridges/primitives/chain-bridge-hub-polkadot", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } +bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-polkadot = { path = "../../../../../bridges/chains/chain-bridge-hub-polkadot", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } -bp-polkadot-bulletin = { path = "../../../../../bridges/primitives/chain-polkadot-bulletin", default-features = false } +bp-polkadot-bulletin = { path = "../../../../../bridges/chains/chain-polkadot-bulletin", default-features = false } bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", default-features = false } bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } -bp-rococo = { path = "../../../../../bridges/primitives/chain-rococo", default-features = false } -bp-westend = { path = "../../../../../bridges/primitives/chain-westend", default-features = false } +bp-rococo = { path = "../../../../../bridges/chains/chain-rococo", default-features = false } +bp-westend = { path = "../../../../../bridges/chains/chain-westend", default-features = false } pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 4eb201eedc1..0c46e6c2e14 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -77,25 +77,25 @@ parachains-common = { path = "../../../common", default-features = false } testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } # Bridges -bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } +bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", default-features = false } bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } -bp-rococo = { path = "../../../../../bridges/primitives/chain-rococo", default-features = false } -bp-westend = { path = "../../../../../bridges/primitives/chain-westend", default-features = false } +bp-rococo = { path = "../../../../../bridges/chains/chain-rococo", default-features = false } +bp-westend = { path = "../../../../../bridges/chains/chain-westend", default-features = false } pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } pallet-bridge-relayers = { path = "../../../../../bridges/modules/relayers", default-features = false } pallet-xcm-bridge-hub = { path = "../../../../../bridges/modules/xcm-bridge-hub", default-features = false } bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", default-features = false } -bridge-hub-common = { path = "../../bridge-hubs/common", default-features = false } +bridge-hub-common = { path = "../common", default-features = false } [dev-dependencies] static_assertions = "1.1" diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index 4835fb5192b..73e0f03cd37 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -20,8 +20,8 @@ frame-system = { path = "../../../substrate/frame/system", default-features = fa sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } -cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction", default-features = false } +cumulus-primitives-core = { path = "../core", default-features = false } +cumulus-primitives-proof-size-hostfunction = { path = "../proof-size-hostfunction", default-features = false } docify = "0.2.7" [dev-dependencies] diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index b0d71a18eaa..883568b23f7 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -46,7 +46,7 @@ assert_cmd = "2.0.4" nix = { version = "0.26.1", features = ["signal"] } tempfile = "3.2.0" tokio = "1.24.2" -substrate-rpc-client = { path = "../substrate/utils/frame/rpc/client/" } +substrate-rpc-client = { path = "../substrate/utils/frame/rpc/client" } polkadot-core-primitives = { path = "core-primitives" } [build-dependencies] diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 9ed64b88ffd..6ad36a39be6 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -50,7 +50,7 @@ hex-literal = "0.4.1" polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] } # For benches and integration tests, depend on ourselves with the test-utils # feature. -polkadot-node-core-pvf = { path = ".", features = ["test-utils"] } +polkadot-node-core-pvf = { path = "", features = ["test-utils"] } rococo-runtime = { path = "../../../runtime/rococo" } adder = { package = "test-parachain-adder", path = "../../../parachain/test-parachains/adder" } diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml index 33da035878a..90dd823eb64 100644 --- a/templates/solochain/runtime/Cargo.toml +++ b/templates/solochain/runtime/Cargo.toml @@ -62,7 +62,7 @@ sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", sp-version = { path = "../../../substrate/primitives/version", default-features = false, features = [ "serde", ] } -sp-genesis-builder = { default-features = false, path = "../../../substrate/primitives/genesis-builder" } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } # RPC related frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } -- GitLab From c6f7ccf51a54d0272643b2beaeab7458df8492e0 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Mon, 25 Mar 2024 15:38:13 +0200 Subject: [PATCH 025/128] elastic scaling: preserve candidate ordering in provisioner (#3778) https://github.com/paritytech/polkadot-sdk/issues/3742 --- polkadot/node/core/backing/src/error.rs | 6 +- polkadot/node/core/backing/src/lib.rs | 32 +- polkadot/node/core/backing/src/tests/mod.rs | 343 +++++++++++++++++- polkadot/node/core/provisioner/src/lib.rs | 61 ++-- polkadot/node/core/provisioner/src/tests.rs | 176 +++++++-- polkadot/node/overseer/src/tests.rs | 2 +- polkadot/node/subsystem-types/src/messages.rs | 11 +- .../src/types/overseer-protocol.md | 12 +- 8 files changed, 569 insertions(+), 74 deletions(-) diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs index 64955a39396..52684f3fe30 100644 --- a/polkadot/node/core/backing/src/error.rs +++ b/polkadot/node/core/backing/src/error.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use std::collections::HashMap; + use fatality::Nested; use futures::channel::{mpsc, oneshot}; @@ -24,7 +26,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{runtime, Error as UtilError}; use polkadot_primitives::{BackedCandidate, ValidationCodeHash}; -use crate::LOG_TARGET; +use crate::{ParaId, LOG_TARGET}; pub type Result = std::result::Result; pub type FatalResult = std::result::Result; @@ -55,7 +57,7 @@ pub enum Error { InvalidSignature, #[error("Failed to send candidates {0:?}")] - Send(Vec), + Send(HashMap>), #[error("FetchPoV failed")] FetchPoV, diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 69bf2e956a0..532ae2bd7cb 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -2231,15 +2231,16 @@ async fn handle_statement_message( fn handle_get_backed_candidates_message( state: &State, - requested_candidates: Vec<(CandidateHash, Hash)>, - tx: oneshot::Sender>, + requested_candidates: HashMap>, + tx: oneshot::Sender>>, metrics: &Metrics, ) -> Result<(), Error> { let _timer = metrics.time_get_backed_candidates(); - let backed = requested_candidates - .into_iter() - .filter_map(|(candidate_hash, relay_parent)| { + let mut backed = HashMap::with_capacity(requested_candidates.len()); + + for (para_id, para_candidates) in requested_candidates { + for (candidate_hash, relay_parent) in para_candidates.iter() { let rp_state = match state.per_relay_parent.get(&relay_parent) { Some(rp_state) => rp_state, None => { @@ -2249,13 +2250,13 @@ fn handle_get_backed_candidates_message( ?candidate_hash, "Requested candidate's relay parent is out of view", ); - return None + break }, }; - rp_state + let maybe_backed_candidate = rp_state .table .attested_candidate( - &candidate_hash, + candidate_hash, &rp_state.table_context, rp_state.minimum_backing_votes, ) @@ -2265,9 +2266,18 @@ fn handle_get_backed_candidates_message( &rp_state.table_context, rp_state.inject_core_index, ) - }) - }) - .collect(); + }); + + if let Some(backed_candidate) = maybe_backed_candidate { + backed + .entry(para_id) + .or_insert_with(|| Vec::with_capacity(para_candidates.len())) + .push(backed_candidate); + } else { + break + } + } + } tx.send(backed).map_err(|data| Error::Send(data))?; Ok(()) diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index e3cc5727435..fdb47581ea3 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -663,13 +663,19 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( - vec![(candidate_a_hash, test_state.relay_parent)], + std::iter::once(( + test_state.chain_ids[0], + vec![(candidate_a_hash, test_state.relay_parent)], + )) + .collect(), tx, ); virtual_overseer.send(FromOrchestra::Communication { msg }).await; - let candidates = rx.await.unwrap(); + let mut candidates = rx.await.unwrap(); + assert_eq!(1, candidates.len()); + let candidates = candidates.remove(&test_state.chain_ids[0]).unwrap(); assert_eq!(1, candidates.len()); assert_eq!(candidates[0].validity_votes().len(), 3); @@ -695,6 +701,323 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { }); } +#[test] +fn get_backed_candidate_preserves_order() { + let mut test_state = TestState::default(); + test_state + .node_features + .resize((node_features::FeatureIndex::ElasticScalingMVP as u8 + 1) as usize, false); + test_state + .node_features + .set(node_features::FeatureIndex::ElasticScalingMVP as u8 as usize, true); + + // Set a single validator as the first validator group. It simplifies the test. + test_state.validator_groups.0[0] = vec![ValidatorIndex(2)]; + // Add another validator group for the third core. + test_state.validator_groups.0.push(vec![ValidatorIndex(3)]); + // Assign the second core to the same para as the first one. + test_state.availability_cores[1] = + CoreState::Scheduled(ScheduledCore { para_id: test_state.chain_ids[0], collator: None }); + // Add another availability core for paraid 2. + test_state.availability_cores.push(CoreState::Scheduled(ScheduledCore { + para_id: test_state.chain_ids[1], + collator: None, + })); + + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + test_startup(&mut virtual_overseer, &test_state).await; + + let pov_a = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pov_b = PoV { block_data: BlockData(vec![3, 4, 5]) }; + let pov_c = PoV { block_data: BlockData(vec![5, 6, 7]) }; + let validation_code_ab = ValidationCode(vec![1, 2, 3]); + let validation_code_c = ValidationCode(vec![4, 5, 6]); + + let parent_head_data_a = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + let parent_head_data_b = { + let mut head = parent_head_data_a.clone(); + head.0[0] = 98; + head + }; + let output_head_data_b = { + let mut head = parent_head_data_a.clone(); + head.0[0] = 99; + head + }; + let parent_head_data_c = test_state.head_data.get(&test_state.chain_ids[1]).unwrap(); + let output_head_data_c = { + let mut head = parent_head_data_c.clone(); + head.0[0] = 97; + head + }; + + let pvd_a = PersistedValidationData { + parent_head: parent_head_data_a.clone(), + relay_parent_number: 0_u32.into(), + max_pov_size: 1024, + relay_parent_storage_root: dummy_hash(), + }; + let pvd_b = PersistedValidationData { + parent_head: parent_head_data_b.clone(), + relay_parent_number: 0_u32.into(), + max_pov_size: 1024, + relay_parent_storage_root: dummy_hash(), + }; + let pvd_c = PersistedValidationData { + parent_head: parent_head_data_c.clone(), + relay_parent_number: 0_u32.into(), + max_pov_size: 1024, + relay_parent_storage_root: dummy_hash(), + }; + + let candidate_a = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash: pov_a.hash(), + head_data: parent_head_data_b.clone(), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + validation_code: validation_code_ab.0.clone(), + persisted_validation_data_hash: pvd_a.hash(), + } + .build(); + let candidate_b = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash: pov_b.hash(), + head_data: output_head_data_b.clone(), + erasure_root: make_erasure_root(&test_state, pov_b.clone(), pvd_b.clone()), + validation_code: validation_code_ab.0.clone(), + persisted_validation_data_hash: pvd_b.hash(), + } + .build(); + let candidate_c = TestCandidateBuilder { + para_id: test_state.chain_ids[1], + relay_parent: test_state.relay_parent, + pov_hash: pov_c.hash(), + head_data: output_head_data_c.clone(), + erasure_root: make_erasure_root(&test_state, pov_b.clone(), pvd_c.clone()), + validation_code: validation_code_c.0.clone(), + persisted_validation_data_hash: pvd_c.hash(), + } + .build(); + let candidate_a_hash = candidate_a.hash(); + let candidate_b_hash = candidate_b.hash(); + let candidate_c_hash = candidate_c.hash(); + + // Back a chain of two candidates for the first paraid. Back one candidate for the second + // paraid. + for (candidate, pvd, validator_index) in [ + (candidate_a, pvd_a, ValidatorIndex(2)), + (candidate_b, pvd_b, ValidatorIndex(1)), + (candidate_c, pvd_c, ValidatorIndex(3)), + ] { + let public = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[validator_index.0 as usize].to_seed()), + ) + .expect("Insert key into keystore"); + + let signed = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), + &test_state.signing_context, + validator_index, + &public.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let statement = + CandidateBackingMessage::Statement(test_state.relay_parent, signed.clone()); + + virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(candidate_receipt) + ) + ) => { + assert_eq!(candidate_receipt, candidate.to_plain()); + } + ); + } + + // Happy case, all candidates should be present. + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + [ + ( + test_state.chain_ids[0], + vec![ + (candidate_a_hash, test_state.relay_parent), + (candidate_b_hash, test_state.relay_parent), + ], + ), + (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), + ] + .into_iter() + .collect(), + tx, + ); + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + let mut candidates = rx.await.unwrap(); + assert_eq!(2, candidates.len()); + assert_eq!( + candidates + .remove(&test_state.chain_ids[0]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_a_hash, candidate_b_hash] + ); + assert_eq!( + candidates + .remove(&test_state.chain_ids[1]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_c_hash] + ); + + // The first candidate of the first para is invalid (we supply the wrong relay parent or a + // wrong candidate hash). No candidates should be returned for paraid 1. ParaId 2 should be + // fine. + for candidates in [ + vec![ + (candidate_a_hash, Hash::repeat_byte(9)), + (candidate_b_hash, test_state.relay_parent), + ], + vec![ + (CandidateHash(Hash::repeat_byte(9)), test_state.relay_parent), + (candidate_b_hash, test_state.relay_parent), + ], + ] { + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + [ + (test_state.chain_ids[0], candidates), + (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), + ] + .into_iter() + .collect(), + tx, + ); + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + let mut candidates = rx.await.unwrap(); + assert_eq!(candidates.len(), 1); + + assert!(candidates.remove(&test_state.chain_ids[0]).is_none()); + assert_eq!( + candidates + .remove(&test_state.chain_ids[1]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_c_hash] + ); + } + + // The second candidate of the first para is invalid (we supply the wrong relay parent or a + // wrong candidate hash). The first candidate of the first para should still be present. + // ParaId 2 is fine. + for candidates in [ + vec![ + (candidate_a_hash, test_state.relay_parent), + (candidate_b_hash, Hash::repeat_byte(9)), + ], + vec![ + (candidate_a_hash, test_state.relay_parent), + (CandidateHash(Hash::repeat_byte(9)), test_state.relay_parent), + ], + ] { + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + [ + (test_state.chain_ids[0], candidates), + (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), + ] + .into_iter() + .collect(), + tx, + ); + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + let mut candidates = rx.await.unwrap(); + assert_eq!(2, candidates.len()); + assert_eq!( + candidates + .remove(&test_state.chain_ids[0]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_a_hash] + ); + assert_eq!( + candidates + .remove(&test_state.chain_ids[1]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_c_hash] + ); + } + + // Both candidates of para id 1 are invalid (we supply the wrong relay parent or a wrong + // candidate hash). No candidates should be returned for para id 1. Para Id 2 is fine. + for candidates in [ + vec![ + (CandidateHash(Hash::repeat_byte(9)), test_state.relay_parent), + (CandidateHash(Hash::repeat_byte(10)), test_state.relay_parent), + ], + vec![ + (candidate_a_hash, Hash::repeat_byte(9)), + (candidate_b_hash, Hash::repeat_byte(10)), + ], + ] { + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + [ + (test_state.chain_ids[0], candidates), + (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), + ] + .into_iter() + .collect(), + tx, + ); + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + let mut candidates = rx.await.unwrap(); + assert_eq!(candidates.len(), 1); + + assert!(candidates.remove(&test_state.chain_ids[0]).is_none()); + assert_eq!( + candidates + .remove(&test_state.chain_ids[1]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_c_hash] + ); + } + + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::stop_work(test_state.relay_parent), + ))) + .await; + virtual_overseer + }); +} + #[test] fn extract_core_index_from_statement_works() { let test_state = TestState::default(); @@ -950,13 +1273,19 @@ fn backing_works_while_validation_ongoing() { let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( - vec![(candidate_a.hash(), test_state.relay_parent)], + std::iter::once(( + test_state.chain_ids[0], + vec![(candidate_a.hash(), test_state.relay_parent)], + )) + .collect(), tx, ); virtual_overseer.send(FromOrchestra::Communication { msg }).await; - let candidates = rx.await.unwrap(); + let mut candidates = rx.await.unwrap(); + assert_eq!(candidates.len(), 1); + let candidates = candidates.remove(&test_state.chain_ids[0]).unwrap(); assert_eq!(1, candidates.len()); assert_eq!(candidates[0].validity_votes().len(), 3); @@ -1565,7 +1894,11 @@ fn backing_works_after_failed_validation() { // and check that it is still alive. let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( - vec![(candidate.hash(), test_state.relay_parent)], + std::iter::once(( + test_state.chain_ids[0], + vec![(candidate.hash(), test_state.relay_parent)], + )) + .collect(), tx, ); diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index c9ed873d3c2..3ccf499f325 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -46,7 +46,7 @@ use polkadot_primitives::{ BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, OccupiedCoreAssumption, SessionIndex, SignedAvailabilityBitfield, ValidatorIndex, }; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap}; mod disputes; mod error; @@ -598,13 +598,11 @@ async fn select_candidate_hashes_from_tracked( candidates: &[CandidateReceipt], relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result>, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; let mut selected_candidates = - Vec::with_capacity(candidates.len().min(availability_cores.len())); - let mut selected_parachains = - HashSet::with_capacity(candidates.len().min(availability_cores.len())); + HashMap::with_capacity(candidates.len().min(availability_cores.len())); gum::debug!( target: LOG_TARGET, @@ -638,7 +636,7 @@ async fn select_candidate_hashes_from_tracked( CoreState::Free => continue, }; - if selected_parachains.contains(&scheduled_core.para_id) { + if selected_candidates.contains_key(&scheduled_core.para_id) { // We already picked a candidate for this parachain. Elastic scaling only works with // prospective parachains mode. continue @@ -677,8 +675,10 @@ async fn select_candidate_hashes_from_tracked( "Selected candidate receipt", ); - selected_parachains.insert(candidate.descriptor.para_id); - selected_candidates.push((candidate_hash, candidate.descriptor.relay_parent)); + selected_candidates.insert( + candidate.descriptor.para_id, + vec![(candidate_hash, candidate.descriptor.relay_parent)], + ); } } @@ -695,12 +695,12 @@ async fn request_backable_candidates( bitfields: &[SignedAvailabilityBitfield], relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result>, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; // Record how many cores are scheduled for each paraid. Use a BTreeMap because // we'll need to iterate through them. - let mut scheduled_cores: BTreeMap = BTreeMap::new(); + let mut scheduled_cores_per_para: BTreeMap = BTreeMap::new(); // The on-chain ancestors of a para present in availability-cores. let mut ancestors: HashMap = HashMap::with_capacity(availability_cores.len()); @@ -709,7 +709,7 @@ async fn request_backable_candidates( let core_idx = CoreIndex(core_idx as u32); match core { CoreState::Scheduled(scheduled_core) => { - *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; + *scheduled_cores_per_para.entry(scheduled_core.para_id).or_insert(0) += 1; }, CoreState::Occupied(occupied_core) => { let is_available = bitfields_indicate_availability( @@ -726,14 +726,14 @@ async fn request_backable_candidates( if let Some(ref scheduled_core) = occupied_core.next_up_on_available { // Request a new backable candidate for the newly scheduled para id. - *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; + *scheduled_cores_per_para.entry(scheduled_core.para_id).or_insert(0) += 1; } } else if occupied_core.time_out_at <= block_number { // Timed out before being available. if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out { // Candidate's availability timed out, practically same as scheduled. - *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; + *scheduled_cores_per_para.entry(scheduled_core.para_id).or_insert(0) += 1; } } else { // Not timed out and not available. @@ -747,10 +747,10 @@ async fn request_backable_candidates( }; } - let mut selected_candidates: Vec<(CandidateHash, Hash)> = - Vec::with_capacity(availability_cores.len()); + let mut selected_candidates: HashMap> = + HashMap::with_capacity(scheduled_cores_per_para.len()); - for (para_id, core_count) in scheduled_cores { + for (para_id, core_count) in scheduled_cores_per_para { let para_ancestors = ancestors.remove(¶_id).unwrap_or_default(); // If elastic scaling MVP is disabled, only allow one candidate per parachain. @@ -777,7 +777,7 @@ async fn request_backable_candidates( continue } - selected_candidates.extend(response.into_iter().take(core_count)); + selected_candidates.insert(para_id, response); } Ok(selected_candidates) @@ -826,33 +826,38 @@ async fn select_candidates( selected_candidates.clone(), tx, )); - let mut candidates = rx.await.map_err(|err| Error::CanceledBackedCandidates(err))?; + let candidates = rx.await.map_err(|err| Error::CanceledBackedCandidates(err))?; gum::trace!(target: LOG_TARGET, leaf_hash=?relay_parent, "Got {} backed candidates", candidates.len()); // keep only one candidate with validation code. let mut with_validation_code = false; - candidates.retain(|c| { - if c.candidate().commitments.new_validation_code.is_some() { - if with_validation_code { - return false + // merge the candidates into a common collection, preserving the order + let mut merged_candidates = Vec::with_capacity(availability_cores.len()); + + for para_candidates in candidates.into_values() { + for candidate in para_candidates { + if candidate.candidate().commitments.new_validation_code.is_some() { + if with_validation_code { + break + } else { + with_validation_code = true; + } } - with_validation_code = true; + merged_candidates.push(candidate); } - - true - }); + } gum::debug!( target: LOG_TARGET, - n_candidates = candidates.len(), + n_candidates = merged_candidates.len(), n_cores = availability_cores.len(), ?relay_parent, "Selected backed candidates", ); - Ok(candidates) + Ok(merged_candidates) } /// Produces a block number 1 higher than that of the relay parent diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index bdb4f85f400..823b1d86e46 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -258,6 +258,8 @@ mod select_candidates { BlockNumber, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, }; use rstest::rstest; + use std::ops::Not; + use CoreState::{Free, Scheduled}; const BLOCK_UNDER_PRODUCTION: BlockNumber = 128; @@ -323,9 +325,6 @@ mod select_candidates { // 11: Occupied(next_up_on_available and available, but different successor para_id) // ] fn mock_availability_cores_one_per_para() -> Vec { - use std::ops::Not; - use CoreState::{Free, Scheduled}; - vec![ // 0: Free, Free, @@ -389,9 +388,6 @@ mod select_candidates { // For test purposes with multiple possible cores assigned to a para, we always return this set // of availability cores: fn mock_availability_cores_multiple_per_para() -> Vec { - use std::ops::Not; - use CoreState::{Free, Scheduled}; - vec![ // 0: Free, Free, @@ -562,7 +558,10 @@ mod select_candidates { use ChainApiMessage::BlockNumber; use RuntimeApiMessage::Request; - let mut backed_iter = expected.clone().into_iter(); + let mut backed = expected.clone().into_iter().fold(HashMap::new(), |mut acc, candidate| { + acc.entry(candidate.descriptor().para_id).or_insert(vec![]).push(candidate); + acc + }); expected.sort_by_key(|c| c.candidate().descriptor.para_id); let mut candidates_iter = expected @@ -583,11 +582,30 @@ mod select_candidates { hashes, sender, )) => { - let response: Vec = - backed_iter.by_ref().take(hashes.len()).collect(); - let expected_hashes: Vec<(CandidateHash, Hash)> = response + let mut response: HashMap> = HashMap::new(); + for (para_id, requested_candidates) in hashes.clone() { + response.insert( + para_id, + backed + .get_mut(¶_id) + .unwrap() + .drain(0..requested_candidates.len()) + .collect(), + ); + } + let expected_hashes: HashMap> = response .iter() - .map(|candidate| (candidate.hash(), candidate.descriptor().relay_parent)) + .map(|(para_id, candidates)| { + ( + *para_id, + candidates + .iter() + .map(|candidate| { + (candidate.hash(), candidate.descriptor().relay_parent) + }) + .collect(), + ) + }) .collect(); assert_eq!(expected_hashes, hashes); @@ -768,7 +786,7 @@ mod select_candidates { #[rstest] #[case(ProspectiveParachainsMode::Disabled)] #[case(ProspectiveParachainsMode::Enabled {max_candidate_depth: 0, allowed_ancestry_len: 0})] - fn selects_max_one_code_upgrade( + fn selects_max_one_code_upgrade_one_core_per_para( #[case] prospective_parachains_mode: ProspectiveParachainsMode, ) { let mock_cores = mock_availability_cores_one_per_para(); @@ -780,7 +798,12 @@ mod select_candidates { let cores = [1, 4, 7, 8, 10, 12]; let cores_with_code = [1, 4, 8]; - let expected_cores = [1, 7, 10, 12]; + // We can't be sure which one code upgrade the provisioner will pick. We can only assert + // that it only picks one. These are the possible cores for which the provisioner will + // supply candidates. + // There are multiple possibilities depending on which code upgrade it + // chooses. + let possible_expected_cores = [[1, 7, 10, 12], [4, 7, 10, 12], [7, 8, 10, 12]]; let committed_receipts: Vec<_> = (0..=mock_cores.len()) .map(|i| { @@ -820,8 +843,10 @@ mod select_candidates { // Then, some of them get filtered due to new validation code rule. let expected_backed: Vec<_> = cores.iter().map(|&idx| backed_candidates[idx].clone()).collect(); - let expected_backed_filtered: Vec<_> = - expected_cores.iter().map(|&idx| candidates[idx].clone()).collect(); + let expected_backed_filtered: Vec> = possible_expected_cores + .iter() + .map(|indices| indices.iter().map(|&idx| candidates[idx].clone()).collect()) + .collect(); let mock_cores_clone = mock_cores.clone(); @@ -850,13 +875,120 @@ mod select_candidates { assert_eq!(result.len(), 4); - result.into_iter().for_each(|c| { - assert!( - expected_backed_filtered.iter().any(|c2| c.candidate().corresponds_to(c2)), - "Failed to find candidate: {:?}", - c, - ) - }); + assert!(expected_backed_filtered.iter().any(|expected_backed_filtered| { + result.clone().into_iter().all(|c| { + expected_backed_filtered.iter().any(|c2| c.candidate().corresponds_to(c2)) + }) + })); + }, + ) + } + + #[test] + fn selects_max_one_code_upgrade_multiple_cores_per_para() { + let prospective_parachains_mode = + ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; + let mock_cores = vec![ + // 0: Scheduled(default), + Scheduled(scheduled_core(1)), + // 1: Scheduled(default), + Scheduled(scheduled_core(2)), + // 2: Scheduled(default), + Scheduled(scheduled_core(2)), + // 3: Scheduled(default), + Scheduled(scheduled_core(2)), + // 4: Scheduled(default), + Scheduled(scheduled_core(3)), + // 5: Scheduled(default), + Scheduled(scheduled_core(3)), + // 6: Scheduled(default), + Scheduled(scheduled_core(3)), + ]; + + let empty_hash = PersistedValidationData::::default().hash(); + let cores_with_code = [0, 2, 4, 5]; + + // We can't be sure which one code upgrade the provisioner will pick. We can only assert + // that it only picks one. + // These are the possible cores for which the provisioner will + // supply candidates. There are multiple possibilities depending on which code upgrade it + // chooses. + let possible_expected_cores = [vec![0, 1], vec![1, 2, 3], vec![4, 1]]; + + let committed_receipts: Vec<_> = (0..mock_cores.len()) + .map(|i| { + let mut descriptor = dummy_candidate_descriptor(dummy_hash()); + descriptor.para_id = mock_cores[i].para_id().unwrap(); + descriptor.persisted_validation_data_hash = empty_hash; + descriptor.pov_hash = Hash::from_low_u64_be(i as u64); + CommittedCandidateReceipt { + descriptor, + commitments: CandidateCommitments { + new_validation_code: if cores_with_code.contains(&i) { + Some(vec![].into()) + } else { + None + }, + ..Default::default() + }, + } + }) + .collect(); + + // Input to select_candidates + let candidates: Vec<_> = committed_receipts.iter().map(|r| r.to_plain()).collect(); + // Build possible outputs from select_candidates + let backed_candidates: Vec<_> = committed_receipts + .iter() + .map(|committed_receipt| { + BackedCandidate::new( + committed_receipt.clone(), + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + None, + ) + }) + .collect(); + + // First, provisioner will request backable candidates for each scheduled core. + // Then, some of them get filtered due to new validation code rule. + let expected_backed: Vec<_> = + (0..mock_cores.len()).map(|idx| backed_candidates[idx].clone()).collect(); + let expected_backed_filtered: Vec> = possible_expected_cores + .iter() + .map(|indices| indices.iter().map(|&idx| candidates[idx].clone()).collect()) + .collect(); + + let mock_cores_clone = mock_cores.clone(); + + test_harness( + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, + |mut tx: TestSubsystemSender| async move { + let result = select_candidates( + &mock_cores, + &[], + &candidates, + prospective_parachains_mode, + true, + Default::default(), + &mut tx, + ) + .await + .unwrap(); + + assert!(expected_backed_filtered.iter().any(|expected_backed_filtered| { + result.clone().into_iter().all(|c| { + expected_backed_filtered.iter().any(|c2| c.candidate().corresponds_to(c2)) + }) && (expected_backed_filtered.len() == result.len()) + })); }, ) } diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index 0494274367d..55a6bdb74ba 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -811,7 +811,7 @@ fn test_candidate_validation_msg() -> CandidateValidationMessage { fn test_candidate_backing_msg() -> CandidateBackingMessage { let (sender, _) = oneshot::channel(); - CandidateBackingMessage::GetBackedCandidates(Vec::new(), sender) + CandidateBackingMessage::GetBackedCandidates(Default::default(), sender) } fn test_chain_api_msg() -> ChainApiMessage { diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 5115efa853c..92c35d1b7b9 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -82,8 +82,15 @@ pub struct CanSecondRequest { pub enum CandidateBackingMessage { /// Requests a set of backable candidates attested by the subsystem. /// - /// Each pair is (candidate_hash, candidate_relay_parent). - GetBackedCandidates(Vec<(CandidateHash, Hash)>, oneshot::Sender>), + /// The order of candidates of the same para must be preserved in the response. + /// If a backed candidate of a para cannot be retrieved, the response should not contain any + /// candidates of the same para that follow it in the input vector. In other words, assuming + /// candidates are supplied in dependency order, we must ensure that this dependency order is + /// preserved. + GetBackedCandidates( + HashMap>, + oneshot::Sender>>, + ), /// Request the subsystem to check whether it's allowed to second given candidate. /// The rule is to only fetch collations that are either built on top of the root /// of some fragment tree or have a parent node which represents backed candidate. diff --git a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md index 54cdc2edd12..acfe309ba7b 100644 --- a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -340,9 +340,15 @@ enum BitfieldSigningMessage { } ```rust enum CandidateBackingMessage { /// Requests a set of backable candidates attested by the subsystem. - /// - /// Each pair is (candidate_hash, candidate_relay_parent). - GetBackedCandidates(Vec<(CandidateHash, Hash)>, oneshot::Sender>), + /// The order of candidates of the same para must be preserved in the response. + /// If a backed candidate of a para cannot be retrieved, the response should not contain any + /// candidates of the same para that follow it in the input vector. In other words, assuming + /// candidates are supplied in dependency order, we must ensure that this dependency order is + /// preserved. + GetBackedCandidates( + HashMap>, + oneshot::Sender>>, + ), /// Note that the Candidate Backing subsystem should second the given candidate in the context of the /// given relay-parent (ref. by hash). This candidate must be validated using the provided PoV. /// The PoV is expected to match the `pov_hash` in the descriptor. -- GitLab From ce7613a49ff15652c6b33927f2ac64a64ba09783 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 25 Mar 2024 16:24:58 +0200 Subject: [PATCH 026/128] =?UTF-8?q?[prdoc]=C2=A0Remove=20default=20audienc?= =?UTF-8?q?e=20(#3723)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Devs seem to not realize that this should be filled out manually. The default is also often wrong. --- prdoc/.template.prdoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prdoc/.template.prdoc b/prdoc/.template.prdoc index 097741f388c..03a458876df 100644 --- a/prdoc/.template.prdoc +++ b/prdoc/.template.prdoc @@ -4,7 +4,7 @@ title: ... doc: - - audience: Node Dev + - audience: ... description: | ... -- GitLab From 9d122401f1d434235dfc08ee0509ab6e921bb286 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 25 Mar 2024 15:26:38 +0000 Subject: [PATCH 027/128] Refactor crypto-related traits implementations in term of Public/Signature Bytes (#3806) Another simple refactory to prune some duplicate code Follow up of: https://github.com/paritytech/polkadot-sdk/pull/3684 --- .../primitives/application-crypto/src/lib.rs | 14 +- substrate/primitives/core/src/bandersnatch.rs | 55 +------ substrate/primitives/core/src/bls.rs | 111 +------------ substrate/primitives/core/src/crypto.rs | 115 ++++--------- substrate/primitives/core/src/crypto_bytes.rs | 153 ++++++++++++++++-- substrate/primitives/core/src/ecdsa.rs | 92 +---------- substrate/primitives/core/src/ed25519.rs | 111 +------------ substrate/primitives/core/src/lib.rs | 20 +-- .../primitives/core/src/paired_crypto.rs | 144 +---------------- substrate/primitives/core/src/sr25519.rs | 95 ++++------- .../primitives/statement-store/src/ecies.rs | 2 +- 11 files changed, 254 insertions(+), 658 deletions(-) diff --git a/substrate/primitives/application-crypto/src/lib.rs b/substrate/primitives/application-crypto/src/lib.rs index ea2e5a83127..2355f1ba527 100644 --- a/substrate/primitives/application-crypto/src/lib.rs +++ b/substrate/primitives/application-crypto/src/lib.rs @@ -26,7 +26,7 @@ pub use sp_core::crypto::{DeriveError, Pair, SecretStringError}; #[doc(hidden)] pub use sp_core::{ self, - crypto::{ByteArray, CryptoType, Derive, IsWrappedBy, Public, UncheckedFrom, Wraps}, + crypto::{ByteArray, CryptoType, Derive, IsWrappedBy, Public, Signature, UncheckedFrom, Wraps}, RuntimeDebug, }; @@ -505,6 +505,12 @@ macro_rules! app_crypto_signature_common { } } + impl AsMut<[u8]> for Signature { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } + } + impl $crate::AppSignature for Signature { type Generic = $sig; } @@ -525,6 +531,12 @@ macro_rules! app_crypto_signature_common { } } + impl $crate::Signature for Signature {} + + impl $crate::ByteArray for Signature { + const LEN: usize = <$sig>::LEN; + } + impl Signature { /// Convert into wrapped generic signature type. pub fn into_inner(self) -> $sig { diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs index c9d5f27b47b..42c8b293634 100644 --- a/substrate/primitives/core/src/bandersnatch.rs +++ b/substrate/primitives/core/src/bandersnatch.rs @@ -20,19 +20,12 @@ //! //! The primitive can operate both as a regular VRF or as an anonymized Ring VRF. -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; #[cfg(feature = "full_crypto")] use crate::crypto::VrfSecret; use crate::crypto::{ - ByteArray, CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, Pair as TraitPair, - Public as TraitPublic, PublicBytes, SecretStringError, SignatureBytes, UncheckedFrom, - VrfPublic, + ByteArray, CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, + PublicBytes, SecretStringError, SignatureBytes, UncheckedFrom, VrfPublic, }; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::{format, string::String}; use bandersnatch_vrfs::{CanonicalSerialize, SecretKey}; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; @@ -64,42 +57,10 @@ pub struct BandersnatchTag; /// Bandersnatch public key. pub type Public = PublicBytes; -impl TraitPublic for Public {} - impl CryptoType for Public { type Pair = Pair; } -impl Derive for Public {} - -impl sp_std::fmt::Debug for Public { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.as_ref()), &s[0..8]) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Public { - fn deserialize>(deserializer: D) -> Result { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - /// Bandersnatch signature. /// /// The signature is created via the [`VrfSecret::vrf_sign`] using [`SIGNING_CTX`] as transcript @@ -110,18 +71,6 @@ impl CryptoType for Signature { type Pair = Pair; } -impl sp_std::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - /// The raw secret seed, which can be used to reconstruct the secret [`Pair`]. type Seed = [u8; SEED_SERIALIZED_SIZE]; diff --git a/substrate/primitives/core/src/bls.rs b/substrate/primitives/core/src/bls.rs index 9492a14ff0d..bb04babb3f1 100644 --- a/substrate/primitives/core/src/bls.rs +++ b/substrate/primitives/core/src/bls.rs @@ -23,17 +23,11 @@ //! Chaum-Pedersen proof uses the same hash-to-field specified in RFC 9380 for the field of the BLS //! curve. -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; use crate::crypto::{ - CryptoType, Derive, DeriveError, DeriveJunction, Pair as TraitPair, Public as TraitPublic, - PublicBytes, SecretStringError, SignatureBytes, UncheckedFrom, + CryptoType, DeriveError, DeriveJunction, Pair as TraitPair, PublicBytes, SecretStringError, + SignatureBytes, UncheckedFrom, }; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::{format, string::String}; use sp_std::vec::Vec; use w3f_bls::{ @@ -115,68 +109,6 @@ pub struct BlsTag; /// A public key. pub type Public = PublicBytes; -impl From> for Public { - fn from(x: Pair) -> Self { - x.public() - } -} - -#[cfg(feature = "std")] -impl std::str::FromStr for Public { - type Err = crate::crypto::PublicError; - - fn from_str(s: &str) -> Result { - Self::from_ss58check(s) - } -} - -#[cfg(feature = "std")] -impl std::fmt::Display for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } -} - -#[cfg(feature = "std")] -impl sp_std::fmt::Debug for Public { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) - } -} - -#[cfg(not(feature = "std"))] -impl sp_std::fmt::Debug for Public { - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de, T: BlsBound> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl TraitPublic for Public {} - -impl Derive for Public {} - impl CryptoType for Public { type Pair = Pair; } @@ -184,41 +116,6 @@ impl CryptoType for Public { /// A generic BLS signature. pub type Signature = SignatureBytes; -#[cfg(feature = "serde")] -impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de, T> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl sp_std::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - impl CryptoType for Signature { type Pair = Pair; } @@ -333,8 +230,10 @@ impl CryptoType for Pair { // Test set exercising the BLS12-377 implementation #[cfg(test)] -mod test { +mod tests { use super::*; + #[cfg(feature = "serde")] + use crate::crypto::Ss58Codec; use crate::crypto::DEV_PHRASE; use bls377::{Pair, Signature}; diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs index a2bdc6ed58e..b13899fff51 100644 --- a/substrate/primitives/core/src/crypto.rs +++ b/substrate/primitives/core/src/crypto.rs @@ -485,8 +485,11 @@ pub trait ByteArray: AsRef<[u8]> + AsMut<[u8]> + for<'a> TryFrom<&'a [u8], Error } } -/// Trait suitable for typical cryptographic key public type. -pub trait Public: CryptoType + ByteArray + Derive + PartialEq + Eq + Clone + Send + Sync {} +/// Trait suitable for cryptographic public keys. +pub trait Public: CryptoType + ByteArray + PartialEq + Eq + Clone + Send + Sync + Derive {} + +/// Trait suitable for cryptographic signatures. +pub trait Signature: CryptoType + ByteArray + PartialEq + Eq + Clone + Send + Sync {} /// An opaque 32-byte cryptographic identifier. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo)] @@ -648,32 +651,11 @@ pub use self::dummy::*; mod dummy { use super::*; - /// Dummy cryptography. Doesn't do anything. - #[derive(Clone, Hash, Default, Eq, PartialEq)] - pub struct Dummy; - - impl AsRef<[u8]> for Dummy { - fn as_ref(&self) -> &[u8] { - &b""[..] - } - } + #[doc(hidden)] + pub struct DummyTag; - impl AsMut<[u8]> for Dummy { - fn as_mut(&mut self) -> &mut [u8] { - unsafe { - #[allow(mutable_transmutes)] - sp_std::mem::transmute::<_, &'static mut [u8]>(&b""[..]) - } - } - } - - impl<'a> TryFrom<&'a [u8]> for Dummy { - type Error = (); - - fn try_from(_: &'a [u8]) -> Result { - Ok(Self) - } - } + /// Dummy cryptography. Doesn't do anything. + pub type Dummy = CryptoBytes<0, DummyTag>; impl CryptoType for Dummy { type Pair = Dummy; @@ -681,21 +663,10 @@ mod dummy { impl Derive for Dummy {} - impl ByteArray for Dummy { - const LEN: usize = 0; - fn from_slice(_: &[u8]) -> Result { - Ok(Self) - } - #[cfg(feature = "std")] - fn to_raw_vec(&self) -> Vec { - vec![] - } - fn as_slice(&self) -> &[u8] { - b"" - } - } impl Public for Dummy {} + impl Signature for Dummy {} + impl Pair for Dummy { type Public = Dummy; type Seed = Dummy; @@ -716,15 +687,15 @@ mod dummy { _: Iter, _: Option, ) -> Result<(Self, Option), DeriveError> { - Ok((Self, None)) + Ok((Self::default(), None)) } fn from_seed_slice(_: &[u8]) -> Result { - Ok(Self) + Ok(Self::default()) } fn sign(&self, _: &[u8]) -> Self::Signature { - Self + Self::default() } fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { @@ -732,11 +703,11 @@ mod dummy { } fn public(&self) -> Self::Public { - Self + Self::default() } fn to_raw_vec(&self) -> Vec { - vec![] + Default::default() } } } @@ -845,7 +816,7 @@ pub trait Pair: CryptoType + Sized { /// The type used to represent a signature. Can be created from a key pair and a message /// and verified with the message and a public key. - type Signature: AsRef<[u8]>; + type Signature: Signature; /// Generate new secure (random) key pair. /// @@ -1218,6 +1189,8 @@ mod tests { use super::*; use crate::DeriveJunction; + struct TestCryptoTag; + #[derive(Clone, Eq, PartialEq, Debug)] enum TestPair { Generated, @@ -1226,59 +1199,33 @@ mod tests { Standard { phrase: String, password: Option, path: Vec }, Seed(Vec), } + impl Default for TestPair { fn default() -> Self { TestPair::Generated } } + impl CryptoType for TestPair { type Pair = Self; } - #[derive(Clone, PartialEq, Eq, Hash, Default)] - struct TestPublic; - impl AsRef<[u8]> for TestPublic { - fn as_ref(&self) -> &[u8] { - &[] - } - } - impl AsMut<[u8]> for TestPublic { - fn as_mut(&mut self) -> &mut [u8] { - &mut [] - } - } - impl<'a> TryFrom<&'a [u8]> for TestPublic { - type Error = (); + type TestPublic = PublicBytes<0, TestCryptoTag>; - fn try_from(data: &'a [u8]) -> Result { - Self::from_slice(data) - } - } impl CryptoType for TestPublic { type Pair = TestPair; } - impl Derive for TestPublic {} - impl ByteArray for TestPublic { - const LEN: usize = 0; - fn from_slice(bytes: &[u8]) -> Result { - if bytes.is_empty() { - Ok(Self) - } else { - Err(()) - } - } - fn as_slice(&self) -> &[u8] { - &[] - } - fn to_raw_vec(&self) -> Vec { - vec![] - } + + type TestSignature = SignatureBytes<0, TestCryptoTag>; + + impl CryptoType for TestSignature { + type Pair = TestPair; } - impl Public for TestPublic {} + impl Pair for TestPair { type Public = TestPublic; type Seed = [u8; 8]; - type Signature = [u8; 0]; + type Signature = TestSignature; fn generate() -> (Self, ::Seed) { (TestPair::Generated, [0u8; 8]) @@ -1327,7 +1274,7 @@ mod tests { } fn sign(&self, _message: &[u8]) -> Self::Signature { - [] + TestSignature::default() } fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { @@ -1335,7 +1282,7 @@ mod tests { } fn public(&self) -> Self::Public { - TestPublic + TestPublic::default() } fn from_seed_slice(seed: &[u8]) -> Result { diff --git a/substrate/primitives/core/src/crypto_bytes.rs b/substrate/primitives/core/src/crypto_bytes.rs index 069878e1654..ee5f3482f74 100644 --- a/substrate/primitives/core/src/crypto_bytes.rs +++ b/substrate/primitives/core/src/crypto_bytes.rs @@ -18,15 +18,27 @@ //! Generic byte array which can be specialized with a marker type. use crate::{ - crypto::{FromEntropy, UncheckedFrom}, + crypto::{CryptoType, Derive, FromEntropy, Public, Signature, UncheckedFrom}, hash::{H256, H512}, }; use codec::{Decode, Encode, MaxEncodedLen}; use core::marker::PhantomData; use scale_info::TypeInfo; + use sp_runtime_interface::pass_by::{self, PassBy, PassByInner}; +#[cfg(feature = "serde")] +use crate::crypto::Ss58Codec; +#[cfg(feature = "serde")] +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; + +#[cfg(all(not(feature = "std"), feature = "serde"))] +use sp_std::alloc::{format, string::String}; + +pub use public_bytes::*; +pub use signature_bytes::*; + /// Generic byte array holding some crypto-related raw data. /// /// The type is generic over a constant length `N` and a "tag" `T` which @@ -231,14 +243,137 @@ impl CryptoBytes<64, T> { } } -/// Tag used for generic public key bytes. -pub struct PublicTag; +mod public_bytes { + use super::*; + + /// Tag used for generic public key bytes. + pub struct PublicTag; + + /// Generic encoded public key. + pub type PublicBytes = CryptoBytes; + + impl Derive for PublicBytes where Self: CryptoType {} + + impl Public for PublicBytes where Self: CryptoType {} + + impl sp_std::fmt::Debug for PublicBytes + where + Self: CryptoType, + { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let s = self.to_ss58check(); + write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.as_ref()), &s[0..8]) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } + } + + #[cfg(feature = "std")] + impl std::fmt::Display for PublicBytes + where + Self: CryptoType, + { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.to_ss58check()) + } + } + + #[cfg(feature = "std")] + impl std::str::FromStr for PublicBytes + where + Self: CryptoType, + { + type Err = crate::crypto::PublicError; + + fn from_str(s: &str) -> Result { + Self::from_ss58check(s) + } + } + + #[cfg(feature = "serde")] + impl Serialize for PublicBytes + where + Self: CryptoType, + { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_ss58check()) + } + } + + #[cfg(feature = "serde")] + impl<'de, const N: usize, SubTag> Deserialize<'de> for PublicBytes + where + Self: CryptoType, + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Self::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e))) + } + } +} + +mod signature_bytes { + use super::*; + + /// Tag used for generic signature bytes. + pub struct SignatureTag; + + /// Generic encoded signature. + pub type SignatureBytes = CryptoBytes; -/// Generic encoded public key. -pub type PublicBytes = CryptoBytes; + impl Signature for SignatureBytes where Self: CryptoType {} -/// Tag used for generic signature bytes. -pub struct SignatureTag; + #[cfg(feature = "serde")] + impl Serialize for SignatureBytes + where + Self: CryptoType, + { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&array_bytes::bytes2hex("", self)) + } + } + + #[cfg(feature = "serde")] + impl<'de, const N: usize, SubTag> Deserialize<'de> for SignatureBytes + where + Self: CryptoType, + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e)))?; + Self::try_from(signature_hex.as_ref()) + .map_err(|e| de::Error::custom(format!("{:?}", e))) + } + } -/// Generic encoded signature. -pub type SignatureBytes = CryptoBytes; + impl sp_std::fmt::Debug for SignatureBytes + where + Self: CryptoType, + { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "{}", crate::hexdisplay::HexDisplay::from(&&self.0[..])) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } + } +} diff --git a/substrate/primitives/core/src/ecdsa.rs b/substrate/primitives/core/src/ecdsa.rs index 0e6b06a34dc..9cba8cc3d35 100644 --- a/substrate/primitives/core/src/ecdsa.rs +++ b/substrate/primitives/core/src/ecdsa.rs @@ -17,11 +17,9 @@ //! Simple ECDSA secp256k1 API. -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; use crate::crypto::{ - CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, Pair as TraitPair, - Public as TraitPublic, PublicBytes, SecretStringError, SignatureBytes, + CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, PublicBytes, + SecretStringError, SignatureBytes, }; #[cfg(not(feature = "std"))] @@ -31,10 +29,6 @@ use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, Message, PublicKey, SecretKey, SECP256K1, }; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::{format, string::String}; #[cfg(not(feature = "std"))] use sp_std::vec::Vec; @@ -80,10 +74,6 @@ impl Public { } } -impl TraitPublic for Public {} - -impl Derive for Public {} - #[cfg(feature = "std")] impl From for Public { fn from(pubkey: PublicKey) -> Self { @@ -106,85 +96,9 @@ impl From for Public { } } -#[cfg(feature = "std")] -impl std::fmt::Display for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } -} - -impl sp_std::fmt::Debug for Public { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.as_ref()), &s[0..8]) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - /// A signature (a 512-bit value, plus 8 bits for recovery ID). pub type Signature = SignatureBytes; -#[cfg(feature = "serde")] -impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl sp_std::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - impl Signature { /// Recover the public key from this signature and a message. pub fn recover>(&self, message: M) -> Option { @@ -417,7 +331,7 @@ mod test { use super::*; use crate::crypto::{ set_default_ss58_version, PublicError, Ss58AddressFormat, Ss58AddressFormatRegistry, - DEV_PHRASE, + Ss58Codec, DEV_PHRASE, }; use serde_json; diff --git a/substrate/primitives/core/src/ed25519.rs b/substrate/primitives/core/src/ed25519.rs index 0dda7b95972..a9494f2860b 100644 --- a/substrate/primitives/core/src/ed25519.rs +++ b/substrate/primitives/core/src/ed25519.rs @@ -17,18 +17,13 @@ //! Simple Ed25519 API. -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; use crate::crypto::{ - ByteArray, CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, Pair as TraitPair, - Public as TraitPublic, PublicBytes, SecretStringError, SignatureBytes, + ByteArray, CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, + PublicBytes, SecretStringError, SignatureBytes, }; use ed25519_zebra::{SigningKey, VerificationKey}; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::{format, string::String}; + use sp_std::vec::Vec; /// An identifier used to match public keys against ed25519 keys @@ -51,105 +46,9 @@ pub struct Ed25519Tag; /// A public key. pub type Public = PublicBytes; -impl TraitPublic for Public {} - -impl Derive for Public {} - -#[cfg(feature = "full_crypto")] -impl From for Public { - fn from(x: Pair) -> Self { - x.public() - } -} - -#[cfg(feature = "std")] -impl std::str::FromStr for Public { - type Err = crate::crypto::PublicError; - - fn from_str(s: &str) -> Result { - Self::from_ss58check(s) - } -} - -#[cfg(feature = "std")] -impl std::fmt::Display for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } -} - -impl sp_std::fmt::Debug for Public { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - /// A signature. pub type Signature = SignatureBytes; -#[cfg(feature = "serde")] -impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl sp_std::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - /// A key pair. #[derive(Copy, Clone)] pub struct Pair { @@ -253,8 +152,10 @@ impl CryptoType for Pair { } #[cfg(test)] -mod test { +mod tests { use super::*; + #[cfg(feature = "serde")] + use crate::crypto::Ss58Codec; use crate::crypto::DEV_PHRASE; use serde_json; diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index cf803c6fb49..098bd135bfe 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -56,27 +56,27 @@ pub mod const_hex2array; pub mod crypto; pub mod hexdisplay; pub use paste; - mod address_uri; -#[cfg(feature = "bandersnatch-experimental")] -pub mod bandersnatch; -#[cfg(feature = "bls-experimental")] -pub mod bls; -pub mod crypto_bytes; pub mod defer; -pub mod ecdsa; -pub mod ed25519; pub mod hash; #[cfg(feature = "std")] mod hasher; pub mod offchain; -pub mod paired_crypto; -pub mod sr25519; pub mod testing; #[cfg(feature = "std")] pub mod traits; pub mod uint; +#[cfg(feature = "bandersnatch-experimental")] +pub mod bandersnatch; +#[cfg(feature = "bls-experimental")] +pub mod bls; +pub mod crypto_bytes; +pub mod ecdsa; +pub mod ed25519; +pub mod paired_crypto; +pub mod sr25519; + #[cfg(feature = "bls-experimental")] pub use bls::{bls377, bls381}; #[cfg(feature = "bls-experimental")] diff --git a/substrate/primitives/core/src/paired_crypto.rs b/substrate/primitives/core/src/paired_crypto.rs index 27a7ab28dfd..3901846b375 100644 --- a/substrate/primitives/core/src/paired_crypto.rs +++ b/substrate/primitives/core/src/paired_crypto.rs @@ -19,20 +19,13 @@ use core::marker::PhantomData; -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; use crate::crypto::{ - ByteArray, CryptoType, Derive, DeriveError, DeriveJunction, Pair as PairT, Public as PublicT, - PublicBytes, SecretStringError, SignatureBytes, UncheckedFrom, + ByteArray, CryptoType, DeriveError, DeriveJunction, Pair as PairT, Public as PublicT, + PublicBytes, SecretStringError, Signature as SignatureT, SignatureBytes, UncheckedFrom, }; use sp_std::vec::Vec; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::{format, string::String}; - /// ECDSA and BLS12-377 paired crypto scheme #[cfg(feature = "bls-experimental")] pub mod ecdsa_bls377 { @@ -173,130 +166,10 @@ where } } -#[cfg(feature = "std")] -impl std::fmt::Display - for Public -where - Public: CryptoType, -{ - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } -} - -impl sp_std::fmt::Debug - for Public -where - Public: CryptoType, - [u8; LEFT_PLUS_RIGHT_LEN]: crate::hexdisplay::AsBytesRef, -{ - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize - for Public -where - Public: CryptoType, -{ - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de, const LEFT_PLUS_RIGHT_LEN: usize, SubTag: PairedCryptoSubTagBound> Deserialize<'de> - for Public -where - Public: CryptoType, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl PublicT - for Public -where - Public: CryptoType, -{ -} - -impl Derive - for Public -{ -} - -/// Trait characterizing a signature which could be used as individual component of an -/// `paired_crypto:Signature` pair. -pub trait SignatureBound: ByteArray {} - -impl SignatureBound for T {} - /// A pair of signatures of different types pub type Signature = SignatureBytes; -#[cfg(feature = "serde")] -impl Serialize - for Signature -{ - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de, const LEFT_PLUS_RIGHT_LEN: usize, SubTag> Deserialize<'de> - for Signature -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let bytes = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::::try_from(bytes.as_ref()).map_err(|e| { - de::Error::custom(format!("Error converting deserialized data into signature: {:?}", e)) - }) - } -} - -impl sp_std::fmt::Debug - for Signature -where - [u8; LEFT_PLUS_RIGHT_LEN]: crate::hexdisplay::AsBytesRef, -{ - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - /// A key pair. pub struct Pair< LeftPair: PairT, @@ -332,9 +205,8 @@ impl< > PairT for Pair where Pair: CryptoType, - LeftPair::Signature: SignatureBound, - RightPair::Signature: SignatureBound, - Public: CryptoType, + Public: PublicT, + Signature: SignatureT, LeftPair::Seed: From + Into, RightPair::Seed: From + Into, { @@ -417,14 +289,14 @@ where // Test set exercising the (ECDSA,BLS12-377) implementation #[cfg(all(test, feature = "bls-experimental"))] -mod test { +mod tests { use super::*; - use crate::{crypto::DEV_PHRASE, KeccakHasher}; + #[cfg(feature = "serde")] + use crate::crypto::Ss58Codec; + use crate::{bls377, crypto::DEV_PHRASE, ecdsa, KeccakHasher}; use codec::{Decode, Encode}; use ecdsa_bls377::{Pair, Signature}; - use crate::{bls377, ecdsa}; - #[test] fn test_length_of_paired_ecdsa_and_bls377_public_key_and_signature_is_correct() { assert_eq!( diff --git a/substrate/primitives/core/src/sr25519.rs b/substrate/primitives/core/src/sr25519.rs index ee0546bc534..54b9a98db3d 100644 --- a/substrate/primitives/core/src/sr25519.rs +++ b/substrate/primitives/core/src/sr25519.rs @@ -22,7 +22,9 @@ #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; -use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; +use crate::crypto::{ + CryptoBytes, DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError, +}; #[cfg(feature = "full_crypto")] use schnorrkel::signing_context; use schnorrkel::{ @@ -31,9 +33,7 @@ use schnorrkel::{ }; use sp_std::vec::Vec; -use crate::crypto::{ - CryptoType, CryptoTypeId, Derive, Public as TraitPublic, PublicBytes, SignatureBytes, -}; +use crate::crypto::{CryptoType, CryptoTypeId, Derive, Public as TraitPublic, SignatureBytes}; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; @@ -59,20 +59,28 @@ pub const SIGNATURE_SERIALIZED_SIZE: usize = 64; #[doc(hidden)] pub struct Sr25519Tag; +#[doc(hidden)] +pub struct Sr25519PublicTag; /// An Schnorrkel/Ristretto x25519 ("sr25519") public key. -pub type Public = PublicBytes; +pub type Public = CryptoBytes; -/// An Schnorrkel/Ristretto x25519 ("sr25519") key pair. -pub struct Pair(Keypair); +impl TraitPublic for Public {} -impl Clone for Pair { - fn clone(&self) -> Self { - Pair(schnorrkel::Keypair { - public: self.0.public, - secret: schnorrkel::SecretKey::from_bytes(&self.0.secret.to_bytes()[..]) - .expect("key is always the correct size; qed"), - }) +impl Derive for Public { + /// Derive a child key from a series of given junctions. + /// + /// `None` if there are any hard junctions in there. + #[cfg(feature = "serde")] + fn derive>(&self, path: Iter) -> Option { + let mut acc = PublicKey::from_bytes(self.as_ref()).ok()?; + for j in path { + match j { + DeriveJunction::Soft(cc) => acc = acc.derived_key_simple(ChainCode(cc), &[]).0, + DeriveJunction::Hard(_cc) => return None, + } + } + Some(Self::from(acc.to_bytes())) } } @@ -129,29 +137,6 @@ impl<'de> Deserialize<'de> for Public { /// An Schnorrkel/Ristretto x25519 ("sr25519") signature. pub type Signature = SignatureBytes; -#[cfg(feature = "serde")] -impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - #[cfg(feature = "full_crypto")] impl From for Signature { fn from(s: schnorrkel::Signature) -> Signature { @@ -159,37 +144,19 @@ impl From for Signature { } } -impl sp_std::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} +/// An Schnorrkel/Ristretto x25519 ("sr25519") key pair. +pub struct Pair(Keypair); -impl Derive for Public { - /// Derive a child key from a series of given junctions. - /// - /// `None` if there are any hard junctions in there. - #[cfg(feature = "serde")] - fn derive>(&self, path: Iter) -> Option { - let mut acc = PublicKey::from_bytes(self.as_ref()).ok()?; - for j in path { - match j { - DeriveJunction::Soft(cc) => acc = acc.derived_key_simple(ChainCode(cc), &[]).0, - DeriveJunction::Hard(_cc) => return None, - } - } - Some(Self::from(acc.to_bytes())) +impl Clone for Pair { + fn clone(&self) -> Self { + Pair(schnorrkel::Keypair { + public: self.0.public, + secret: schnorrkel::SecretKey::from_bytes(&self.0.secret.to_bytes()[..]) + .expect("key is always the correct size; qed"), + }) } } -impl TraitPublic for Public {} - #[cfg(feature = "std")] impl From for Pair { fn from(sec: MiniSecretKey) -> Pair { diff --git a/substrate/primitives/statement-store/src/ecies.rs b/substrate/primitives/statement-store/src/ecies.rs index 80a040fd4c8..6fa16658e00 100644 --- a/substrate/primitives/statement-store/src/ecies.rs +++ b/substrate/primitives/statement-store/src/ecies.rs @@ -148,7 +148,7 @@ mod test { #[test] fn basic_ed25519_encryption() { let (pair, _) = sp_core::ed25519::Pair::generate(); - let pk = pair.into(); + let pk = pair.public(); let plain_message = b"An important secret message"; let encrypted = encrypt_ed25519(&pk, plain_message).unwrap(); -- GitLab From cc1e6ac301ea88e3cb3253a84e4c6aa28f2d8f87 Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Mon, 25 Mar 2024 16:57:46 +0100 Subject: [PATCH 028/128] [subsystem-benchmarks] Fix availability-write regression tests (#3698) Adds availability-write regression tests. The results for the `availability-distribution` subsystem are volatile, so I had to reduce the precision of the test. --- .gitlab/pipeline/test.yml | 1 + .../availability-distribution/Cargo.toml | 4 +- ...ilability-distribution-regression-bench.rs | 59 +-- .../availability-recovery-regression-bench.rs | 53 ++- polkadot/node/subsystem-bench/Cargo.toml | 2 +- .../src/cli/subsystem-bench.rs | 14 +- .../src/lib/availability/mod.rs | 396 ++++-------------- .../src/lib/availability/test_state.rs | 268 ++++++++++++ .../subsystem-bench/src/lib/environment.rs | 1 + polkadot/node/subsystem-bench/src/lib/lib.rs | 1 - .../subsystem-bench/src/lib/mock/av_store.rs | 1 + .../src/lib/mock/runtime_api.rs | 2 + .../node/subsystem-bench/src/lib/usage.rs | 15 +- .../node/subsystem-bench/src/lib/utils.rs | 76 ---- 14 files changed, 435 insertions(+), 458 deletions(-) rename polkadot/node/network/availability-distribution/{tests => benches}/availability-distribution-regression-bench.rs (57%) create mode 100644 polkadot/node/subsystem-bench/src/lib/availability/test_state.rs delete mode 100644 polkadot/node/subsystem-bench/src/lib/utils.rs diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index d244316000a..476ac6333f5 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -503,6 +503,7 @@ subsystem-regression-tests: - .run-immediately script: - cargo bench --profile=testnet -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks + - cargo bench --profile=testnet -p polkadot-availability-distribution --bench availability-distribution-regression-bench --features subsystem-benchmarks tags: - benchmark allow_failure: true diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 182d92cb163..ac606bd377f 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -39,9 +39,9 @@ polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } polkadot-subsystem-bench = { path = "../../subsystem-bench" } -[[test]] +[[bench]] name = "availability-distribution-regression-bench" -path = "tests/availability-distribution-regression-bench.rs" +path = "benches/availability-distribution-regression-bench.rs" harness = false required-features = ["subsystem-benchmarks"] diff --git a/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs similarity index 57% rename from polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs rename to polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs index bdab11298d5..019eb122208 100644 --- a/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs +++ b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs @@ -24,46 +24,55 @@ //! - availability-store use polkadot_subsystem_bench::{ - availability::{benchmark_availability_write, prepare_test, TestDataAvailability, TestState}, + availability::{benchmark_availability_write, prepare_test, TestState}, configuration::TestConfiguration, - utils::{warm_up_and_benchmark, WarmUpOptions}, + usage::BenchmarkUsage, }; +use std::io::Write; + +const BENCH_COUNT: usize = 50; fn main() -> Result<(), String> { let mut messages = vec![]; let mut config = TestConfiguration::default(); - // A single node effort roughly n_cores * needed_approvals / n_validators = 60 * 30 / 300 - config.n_cores = 6; + // A single node effort roughly + config.n_cores = 10; + config.n_validators = 500; config.num_blocks = 3; config.generate_pov_sizes(); + let state = TestState::new(&config); - let usage = warm_up_and_benchmark( - WarmUpOptions::new(&[ - "availability-distribution", - "bitfield-distribution", - "availability-store", - ]), - || { - let mut state = TestState::new(&config); - let (mut env, _protocol_config) = - prepare_test(config.clone(), &mut state, TestDataAvailability::Write, false); + println!("Benchmarking..."); + let usages: Vec = (0..BENCH_COUNT) + .map(|n| { + print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n)); + std::io::stdout().flush().unwrap(); + let (mut env, _cfgs) = prepare_test( + &state, + polkadot_subsystem_bench::availability::TestDataAvailability::Write, + false, + ); env.runtime().block_on(benchmark_availability_write( "data_availability_write", &mut env, - state, + &state, )) - }, - )?; - println!("{}", usage); + }) + .collect(); + println!("\rDone!{}", " ".repeat(BENCH_COUNT)); + let average_usage = BenchmarkUsage::average(&usages); + println!("{}", average_usage); - messages.extend(usage.check_network_usage(&[ - ("Received from peers", 443.333, 0.05), - ("Sent to peers", 21818.555, 0.05), + // We expect no variance for received and sent + // but use 0.001 because we operate with floats + messages.extend(average_usage.check_network_usage(&[ + ("Received from peers", 433.3, 0.001), + ("Sent to peers", 18480.0, 0.001), ])); - messages.extend(usage.check_cpu_usage(&[ - ("availability-distribution", 0.011, 0.05), - ("bitfield-distribution", 0.029, 0.05), - ("availability-store", 0.232, 0.05), + messages.extend(average_usage.check_cpu_usage(&[ + ("availability-distribution", 0.012, 0.05), + ("availability-store", 0.153, 0.05), + ("bitfield-distribution", 0.026, 0.05), ])); if messages.is_empty() { diff --git a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs index 42b1787e045..5e8b81be82d 100644 --- a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs +++ b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs @@ -14,9 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! availability-write regression tests +//! availability-read regression tests //! -//! Availability write benchmark based on Kusama parameters and scale. +//! Availability read benchmark based on Kusama parameters and scale. //! //! Subsystems involved: //! - availability-recovery @@ -27,8 +27,11 @@ use polkadot_subsystem_bench::{ TestDataAvailability, TestState, }, configuration::TestConfiguration, - utils::{warm_up_and_benchmark, WarmUpOptions}, + usage::BenchmarkUsage, }; +use std::io::Write; + +const BENCH_COUNT: usize = 50; fn main() -> Result<(), String> { let mut messages = vec![]; @@ -38,27 +41,33 @@ fn main() -> Result<(), String> { config.num_blocks = 3; config.generate_pov_sizes(); - let usage = warm_up_and_benchmark(WarmUpOptions::new(&["availability-recovery"]), || { - let mut state = TestState::new(&config); - let (mut env, _protocol_config) = prepare_test( - config.clone(), - &mut state, - TestDataAvailability::Read(options.clone()), - false, - ); - env.runtime().block_on(benchmark_availability_read( - "data_availability_read", - &mut env, - state, - )) - })?; - println!("{}", usage); + let state = TestState::new(&config); + + println!("Benchmarking..."); + let usages: Vec = (0..BENCH_COUNT) + .map(|n| { + print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n)); + std::io::stdout().flush().unwrap(); + let (mut env, _cfgs) = + prepare_test(&state, TestDataAvailability::Read(options.clone()), false); + env.runtime().block_on(benchmark_availability_read( + "data_availability_read", + &mut env, + &state, + )) + }) + .collect(); + println!("\rDone!{}", " ".repeat(BENCH_COUNT)); + let average_usage = BenchmarkUsage::average(&usages); + println!("{}", average_usage); - messages.extend(usage.check_network_usage(&[ - ("Received from peers", 307200.000, 0.05), - ("Sent to peers", 1.667, 0.05), + // We expect no variance for received and sent + // but use 0.001 because we operate with floats + messages.extend(average_usage.check_network_usage(&[ + ("Received from peers", 307200.000, 0.001), + ("Sent to peers", 1.667, 0.001), ])); - messages.extend(usage.check_cpu_usage(&[("availability-recovery", 11.500, 0.05)])); + messages.extend(average_usage.check_cpu_usage(&[("availability-recovery", 11.500, 0.05)])); if messages.is_empty() { Ok(()) diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 71711ad0fbd..2570fe9cfa2 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -56,7 +56,7 @@ bitvec = "1.0.1" kvdb-memorydb = "0.13.0" parity-scale-codec = { version = "3.6.1", features = ["derive", "std"] } -tokio = "1.24.2" +tokio = { version = "1.24.2", features = ["rt-multi-thread", "parking_lot"] } clap-num = "1.0.2" polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } sp-keyring = { path = "../../../substrate/primitives/keyring" } diff --git a/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs index deb351360d7..10953b6c783 100644 --- a/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs +++ b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs @@ -136,31 +136,29 @@ impl BenchCli { let usage = match objective { TestObjective::DataAvailabilityRead(opts) => { - let mut state = availability::TestState::new(&test_config); + let state = availability::TestState::new(&test_config); let (mut env, _protocol_config) = availability::prepare_test( - test_config, - &mut state, + &state, availability::TestDataAvailability::Read(opts), true, ); env.runtime().block_on(availability::benchmark_availability_read( &benchmark_name, &mut env, - state, + &state, )) }, TestObjective::DataAvailabilityWrite => { - let mut state = availability::TestState::new(&test_config); + let state = availability::TestState::new(&test_config); let (mut env, _protocol_config) = availability::prepare_test( - test_config, - &mut state, + &state, availability::TestDataAvailability::Write, true, ); env.runtime().block_on(availability::benchmark_availability_write( &benchmark_name, &mut env, - state, + &state, )) }, TestObjective::ApprovalVoting(ref options) => { diff --git a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs index dc4e1e40310..765afdd5912 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -15,11 +15,11 @@ // along with Polkadot. If not, see . use crate::{ - configuration::TestConfiguration, + availability::av_store_helpers::new_av_store, dummy_builder, environment::{TestEnvironment, TestEnvironmentDependencies, GENESIS_HASH}, mock::{ - av_store::{self, MockAvailabilityStore}, + av_store::{self, MockAvailabilityStore, NetworkAvailabilityState}, chain_api::{ChainApiState, MockChainApi}, network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx}, runtime_api::{self, MockRuntimeApi}, @@ -28,12 +28,8 @@ use crate::{ network::new_network, usage::BenchmarkUsage, }; -use av_store::NetworkAvailabilityState; -use av_store_helpers::new_av_store; -use bitvec::bitvec; use colored::Colorize; use futures::{channel::oneshot, stream::FuturesUnordered, StreamExt}; -use itertools::Itertools; use parity_scale_codec::Encode; use polkadot_availability_bitfield_distribution::BitfieldDistribution; use polkadot_availability_distribution::{ @@ -43,37 +39,27 @@ use polkadot_availability_recovery::AvailabilityRecoverySubsystem; use polkadot_node_core_av_store::AvailabilityStoreSubsystem; use polkadot_node_metrics::metrics::Metrics; use polkadot_node_network_protocol::{ - request_response::{v1::ChunkFetchingRequest, IncomingRequest, ReqProtocolNames}, - OurView, Versioned, VersionedValidationProtocol, + request_response::{IncomingRequest, ReqProtocolNames}, + OurView, }; -use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV}; use polkadot_node_subsystem::{ messages::{AllMessages, AvailabilityRecoveryMessage}, Overseer, OverseerConnector, SpawnGlue, }; -use polkadot_node_subsystem_test_helpers::{ - derive_erasure_chunks_with_proofs_and_root, mock::new_block_import_info, -}; use polkadot_node_subsystem_types::{ messages::{AvailabilityStoreMessage, NetworkBridgeEvent}, Span, }; use polkadot_overseer::{metrics::Metrics as OverseerMetrics, Handle as OverseerHandle}; -use polkadot_primitives::{ - AvailabilityBitfield, BlockNumber, CandidateHash, CandidateReceipt, GroupIndex, Hash, HeadData, - Header, PersistedValidationData, Signed, SigningContext, ValidatorIndex, -}; -use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; -use sc_network::{ - request_responses::{IncomingRequest as RawIncomingRequest, ProtocolConfig}, - PeerId, -}; +use polkadot_primitives::GroupIndex; +use sc_network::request_responses::{IncomingRequest as RawIncomingRequest, ProtocolConfig}; use sc_service::SpawnTaskHandle; use serde::{Deserialize, Serialize}; -use sp_core::H256; -use std::{collections::HashMap, iter::Cycle, ops::Sub, sync::Arc, time::Instant}; +use std::{ops::Sub, sync::Arc, time::Instant}; +pub use test_state::TestState; mod av_store_helpers; +mod test_state; const LOG_TARGET: &str = "subsystem-bench::availability"; @@ -149,94 +135,48 @@ fn build_overseer_for_availability_write( (overseer, OverseerHandle::new(raw_handle)) } -/// Takes a test configuration and uses it to create the `TestEnvironment`. pub fn prepare_test( - config: TestConfiguration, - state: &mut TestState, - mode: TestDataAvailability, - with_prometheus_endpoint: bool, -) -> (TestEnvironment, Vec) { - prepare_test_inner( - config, - state, - mode, - TestEnvironmentDependencies::default(), - with_prometheus_endpoint, - ) -} - -fn prepare_test_inner( - config: TestConfiguration, - state: &mut TestState, + state: &TestState, mode: TestDataAvailability, - dependencies: TestEnvironmentDependencies, with_prometheus_endpoint: bool, ) -> (TestEnvironment, Vec) { - // Generate test authorities. - let test_authorities = config.generate_authorities(); - - let mut candidate_hashes: HashMap> = HashMap::new(); - - // Prepare per block candidates. - // Genesis block is always finalized, so we start at 1. - for block_num in 1..=config.num_blocks { - for _ in 0..config.n_cores { - candidate_hashes - .entry(Hash::repeat_byte(block_num as u8)) - .or_default() - .push(state.next_candidate().expect("Cycle iterator")) - } - - // First candidate is our backed candidate. - state.backed_candidates.push( - candidate_hashes - .get(&Hash::repeat_byte(block_num as u8)) - .expect("just inserted above") - .first() - .expect("just inserted above") - .clone(), - ); - } - - let runtime_api = runtime_api::MockRuntimeApi::new( - config.clone(), - test_authorities.clone(), - candidate_hashes, - Default::default(), - Default::default(), - 0, - ); - - let availability_state = NetworkAvailabilityState { - candidate_hashes: state.candidate_hashes.clone(), - available_data: state.available_data.clone(), - chunks: state.chunks.clone(), - }; - - let mut req_cfgs = Vec::new(); - let (collation_req_receiver, collation_req_cfg) = IncomingRequest::get_config_receiver(&ReqProtocolNames::new(GENESIS_HASH, None)); - req_cfgs.push(collation_req_cfg); - let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver(&ReqProtocolNames::new(GENESIS_HASH, None)); - let (chunk_req_receiver, chunk_req_cfg) = IncomingRequest::get_config_receiver(&ReqProtocolNames::new(GENESIS_HASH, None)); - req_cfgs.push(pov_req_cfg); + let req_cfgs = vec![collation_req_cfg, pov_req_cfg]; - let (network, network_interface, network_receiver) = - new_network(&config, &dependencies, &test_authorities, vec![Arc::new(availability_state)]); + let dependencies = TestEnvironmentDependencies::default(); + let availability_state = NetworkAvailabilityState { + candidate_hashes: state.candidate_hashes.clone(), + available_data: state.available_data.clone(), + chunks: state.chunks.clone(), + }; + let (network, network_interface, network_receiver) = new_network( + &state.config, + &dependencies, + &state.test_authorities, + vec![Arc::new(availability_state.clone())], + ); let network_bridge_tx = network_bridge::MockNetworkBridgeTx::new( network.clone(), network_interface.subsystem_sender(), - test_authorities.clone(), + state.test_authorities.clone(), ); - let network_bridge_rx = - network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_cfg.clone())); + network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_cfg)); + + let runtime_api = runtime_api::MockRuntimeApi::new( + state.config.clone(), + state.test_authorities.clone(), + state.candidate_receipts.clone(), + Default::default(), + Default::default(), + 0, + ); let (overseer, overseer_handle) = match &mode { TestDataAvailability::Read(options) => { @@ -271,27 +211,12 @@ fn prepare_test_inner( }, TestDataAvailability::Write => { let availability_distribution = AvailabilityDistributionSubsystem::new( - test_authorities.keyring.keystore(), + state.test_authorities.keyring.keystore(), IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver }, Metrics::try_register(&dependencies.registry).unwrap(), ); - let block_headers = (1..=config.num_blocks) - .map(|block_number| { - ( - Hash::repeat_byte(block_number as u8), - Header { - digest: Default::default(), - number: block_number as BlockNumber, - parent_hash: Default::default(), - extrinsics_root: Default::default(), - state_root: Default::default(), - }, - ) - }) - .collect::>(); - - let chain_api_state = ChainApiState { block_headers }; + let chain_api_state = ChainApiState { block_headers: state.block_headers.clone() }; let chain_api = MockChainApi::new(chain_api_state); let bitfield_distribution = BitfieldDistribution::new(Metrics::try_register(&dependencies.registry).unwrap()); @@ -311,167 +236,42 @@ fn prepare_test_inner( ( TestEnvironment::new( dependencies, - config, + state.config.clone(), network, overseer, overseer_handle, - test_authorities, + state.test_authorities.clone(), with_prometheus_endpoint, ), req_cfgs, ) } -#[derive(Clone)] -pub struct TestState { - // Full test configuration - config: TestConfiguration, - // A cycle iterator on all PoV sizes used in the test. - pov_sizes: Cycle>, - // Generated candidate receipts to be used in the test - candidates: Cycle>, - // Map from pov size to candidate index - pov_size_to_candidate: HashMap, - // Map from generated candidate hashes to candidate index in `available_data` - // and `chunks`. - candidate_hashes: HashMap, - // Per candidate index receipts. - candidate_receipt_templates: Vec, - // Per candidate index `AvailableData` - available_data: Vec, - // Per candiadte index chunks - chunks: Vec>, - // Per relay chain block - candidate backed by our backing group - backed_candidates: Vec, -} - -impl TestState { - pub fn next_candidate(&mut self) -> Option { - let candidate = self.candidates.next(); - let candidate_hash = candidate.as_ref().unwrap().hash(); - gum::trace!(target: LOG_TARGET, "Next candidate selected {:?}", candidate_hash); - candidate - } - - /// Generate candidates to be used in the test. - fn generate_candidates(&mut self) { - let count = self.config.n_cores * self.config.num_blocks; - gum::info!(target: LOG_TARGET,"{}", format!("Pre-generating {} candidates.", count).bright_blue()); - - // Generate all candidates - self.candidates = (0..count) - .map(|index| { - let pov_size = self.pov_sizes.next().expect("This is a cycle; qed"); - let candidate_index = *self - .pov_size_to_candidate - .get(&pov_size) - .expect("pov_size always exists; qed"); - let mut candidate_receipt = - self.candidate_receipt_templates[candidate_index].clone(); - - // Make it unique. - candidate_receipt.descriptor.relay_parent = Hash::from_low_u64_be(index as u64); - // Store the new candidate in the state - self.candidate_hashes.insert(candidate_receipt.hash(), candidate_index); - - gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_receipt.hash(), "new candidate"); - - candidate_receipt - }) - .collect::>() - .into_iter() - .cycle(); - } - - pub fn new(config: &TestConfiguration) -> Self { - let config = config.clone(); - - let mut chunks = Vec::new(); - let mut available_data = Vec::new(); - let mut candidate_receipt_templates = Vec::new(); - let mut pov_size_to_candidate = HashMap::new(); - - // we use it for all candidates. - let persisted_validation_data = PersistedValidationData { - parent_head: HeadData(vec![7, 8, 9]), - relay_parent_number: Default::default(), - max_pov_size: 1024, - relay_parent_storage_root: Default::default(), - }; - - // For each unique pov we create a candidate receipt. - for (index, pov_size) in config.pov_sizes().iter().cloned().unique().enumerate() { - gum::info!(target: LOG_TARGET, index, pov_size, "{}", "Generating template candidate".bright_blue()); - - let mut candidate_receipt = dummy_candidate_receipt(dummy_hash()); - let pov = PoV { block_data: BlockData(vec![index as u8; pov_size]) }; - - let new_available_data = AvailableData { - validation_data: persisted_validation_data.clone(), - pov: Arc::new(pov), - }; - - let (new_chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( - config.n_validators, - &new_available_data, - |_, _| {}, - ); - - candidate_receipt.descriptor.erasure_root = erasure_root; - - chunks.push(new_chunks); - available_data.push(new_available_data); - pov_size_to_candidate.insert(pov_size, index); - candidate_receipt_templates.push(candidate_receipt); - } - - gum::info!(target: LOG_TARGET, "{}","Created test environment.".bright_blue()); - - let mut _self = Self { - available_data, - candidate_receipt_templates, - chunks, - pov_size_to_candidate, - pov_sizes: Vec::from(config.pov_sizes()).into_iter().cycle(), - candidate_hashes: HashMap::new(), - candidates: Vec::new().into_iter().cycle(), - backed_candidates: Vec::new(), - config, - }; - - _self.generate_candidates(); - _self - } - - pub fn backed_candidates(&mut self) -> &mut Vec { - &mut self.backed_candidates - } -} - pub async fn benchmark_availability_read( benchmark_name: &str, env: &mut TestEnvironment, - mut state: TestState, + state: &TestState, ) -> BenchmarkUsage { let config = env.config().clone(); - env.import_block(new_block_import_info(Hash::repeat_byte(1), 1)).await; - - let test_start = Instant::now(); - let mut batch = FuturesUnordered::new(); - let mut availability_bytes = 0u128; - env.metrics().set_n_validators(config.n_validators); env.metrics().set_n_cores(config.n_cores); - for block_num in 1..=env.config().num_blocks { + let mut batch = FuturesUnordered::new(); + let mut availability_bytes = 0u128; + let mut candidates = state.candidates.clone(); + let test_start = Instant::now(); + for block_info in state.block_infos.iter() { + let block_num = block_info.number as usize; gum::info!(target: LOG_TARGET, "Current block {}/{}", block_num, env.config().num_blocks); env.metrics().set_current_block(block_num); let block_start_ts = Instant::now(); + env.import_block(block_info.clone()).await; + for candidate_num in 0..config.n_cores as u64 { let candidate = - state.next_candidate().expect("We always send up to n_cores*num_blocks; qed"); + candidates.next().expect("We always send up to n_cores*num_blocks; qed"); let (tx, rx) = oneshot::channel(); batch.push(rx); @@ -519,7 +319,7 @@ pub async fn benchmark_availability_read( pub async fn benchmark_availability_write( benchmark_name: &str, env: &mut TestEnvironment, - mut state: TestState, + state: &TestState, ) -> BenchmarkUsage { let config = env.config().clone(); @@ -527,7 +327,7 @@ pub async fn benchmark_availability_write( env.metrics().set_n_cores(config.n_cores); gum::info!(target: LOG_TARGET, "Seeding availability store with candidates ..."); - for backed_candidate in state.backed_candidates().clone() { + for backed_candidate in state.backed_candidates.clone() { let candidate_index = *state.candidate_hashes.get(&backed_candidate.hash()).unwrap(); let available_data = state.available_data[candidate_index].clone(); let (tx, rx) = oneshot::channel(); @@ -550,15 +350,14 @@ pub async fn benchmark_availability_write( gum::info!(target: LOG_TARGET, "Done"); let test_start = Instant::now(); - - for block_num in 1..=env.config().num_blocks { + for block_info in state.block_infos.iter() { + let block_num = block_info.number as usize; gum::info!(target: LOG_TARGET, "Current block #{}", block_num); env.metrics().set_current_block(block_num); let block_start_ts = Instant::now(); - let relay_block_hash = Hash::repeat_byte(block_num as u8); - env.import_block(new_block_import_info(relay_block_hash, block_num as BlockNumber)) - .await; + let relay_block_hash = block_info.hash; + env.import_block(block_info.clone()).await; // Inform bitfield distribution about our view of current test block let message = polkadot_node_subsystem_types::messages::BitfieldDistributionMessage::NetworkBridgeUpdate( @@ -569,20 +368,13 @@ pub async fn benchmark_availability_write( let chunk_fetch_start_ts = Instant::now(); // Request chunks of our own backed candidate from all other validators. - let mut receivers = Vec::new(); - for index in 1..config.n_validators { + let payloads = state.chunk_fetching_requests.get(block_num - 1).expect("pregenerated"); + let receivers = (1..config.n_validators).filter_map(|index| { let (pending_response, pending_response_receiver) = oneshot::channel(); - let request = RawIncomingRequest { - peer: PeerId::random(), - payload: ChunkFetchingRequest { - candidate_hash: state.backed_candidates()[block_num - 1].hash(), - index: ValidatorIndex(index as u32), - } - .encode(), - pending_response, - }; - + let peer_id = *env.authorities().peer_ids.get(index).expect("all validators have ids"); + let payload = payloads.get(index).expect("pregenerated").clone(); + let request = RawIncomingRequest { peer: peer_id, payload, pending_response }; let peer = env .authorities() .validator_authority_id @@ -592,59 +384,39 @@ pub async fn benchmark_availability_write( if env.network().is_peer_connected(peer) && env.network().send_request_from_peer(peer, request).is_ok() { - receivers.push(pending_response_receiver); + Some(pending_response_receiver) + } else { + None } - } + }); gum::info!(target: LOG_TARGET, "Waiting for all emulated peers to receive their chunk from us ..."); - for receiver in receivers.into_iter() { - let response = receiver.await.expect("Chunk is always served successfully"); - // TODO: check if chunk is the one the peer expects to receive. - assert!(response.result.is_ok()); - } - let chunk_fetch_duration = Instant::now().sub(chunk_fetch_start_ts).as_millis(); + let responses = futures::future::try_join_all(receivers) + .await + .expect("Chunk is always served successfully"); + // TODO: check if chunk is the one the peer expects to receive. + assert!(responses.iter().all(|v| v.result.is_ok())); + let chunk_fetch_duration = Instant::now().sub(chunk_fetch_start_ts).as_millis(); gum::info!(target: LOG_TARGET, "All chunks received in {}ms", chunk_fetch_duration); - let signing_context = SigningContext { session_index: 0, parent_hash: relay_block_hash }; let network = env.network().clone(); let authorities = env.authorities().clone(); - let n_validators = config.n_validators; // Spawn a task that will generate `n_validator` - 1 signed bitfiends and // send them from the emulated peers to the subsystem. // TODO: Implement topology. - env.spawn_blocking("send-bitfields", async move { - for index in 1..n_validators { - let validator_public = - authorities.validator_public.get(index).expect("All validator keys are known"); - - // Node has all the chunks in the world. - let payload: AvailabilityBitfield = - AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); - // TODO(soon): Use pre-signed messages. This is quite intensive on the CPU. - let signed_bitfield = Signed::::sign( - &authorities.keyring.keystore(), - payload, - &signing_context, - ValidatorIndex(index as u32), - validator_public, - ) - .ok() - .flatten() - .expect("should be signed"); - - let from_peer = &authorities.validator_authority_id[index]; - - let message = peer_bitfield_message_v2(relay_block_hash, signed_bitfield); + let messages = state.signed_bitfields.get(&relay_block_hash).expect("pregenerated").clone(); + for index in 1..config.n_validators { + let from_peer = &authorities.validator_authority_id[index]; + let message = messages.get(index).expect("pregenerated").clone(); - // Send the action from peer only if it is connected to our node. - if network.is_peer_connected(from_peer) { - let _ = network.send_message_from_peer(from_peer, message); - } + // Send the action from peer only if it is connected to our node. + if network.is_peer_connected(from_peer) { + let _ = network.send_message_from_peer(from_peer, message); } - }); + } gum::info!( "Waiting for {} bitfields to be received and processed", @@ -679,17 +451,3 @@ pub async fn benchmark_availability_write( &["availability-distribution", "bitfield-distribution", "availability-store"], ) } - -pub fn peer_bitfield_message_v2( - relay_hash: H256, - signed_bitfield: Signed, -) -> VersionedValidationProtocol { - let bitfield = polkadot_node_network_protocol::v2::BitfieldDistributionMessage::Bitfield( - relay_hash, - signed_bitfield.into(), - ); - - Versioned::V2(polkadot_node_network_protocol::v2::ValidationProtocol::BitfieldDistribution( - bitfield, - )) -} diff --git a/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs b/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs new file mode 100644 index 00000000000..c328ffedf91 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs @@ -0,0 +1,268 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::configuration::{TestAuthorities, TestConfiguration}; +use bitvec::bitvec; +use colored::Colorize; +use itertools::Itertools; +use parity_scale_codec::Encode; +use polkadot_node_network_protocol::{ + request_response::v1::ChunkFetchingRequest, Versioned, VersionedValidationProtocol, +}; +use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV}; +use polkadot_node_subsystem_test_helpers::{ + derive_erasure_chunks_with_proofs_and_root, mock::new_block_import_info, +}; +use polkadot_overseer::BlockInfo; +use polkadot_primitives::{ + AvailabilityBitfield, BlockNumber, CandidateHash, CandidateReceipt, Hash, HeadData, Header, + PersistedValidationData, Signed, SigningContext, ValidatorIndex, +}; +use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; +use sp_core::H256; +use std::{collections::HashMap, iter::Cycle, sync::Arc}; + +const LOG_TARGET: &str = "subsystem-bench::availability::test_state"; + +#[derive(Clone)] +pub struct TestState { + // Full test configuration + pub config: TestConfiguration, + // A cycle iterator on all PoV sizes used in the test. + pub pov_sizes: Cycle>, + // Generated candidate receipts to be used in the test + pub candidates: Cycle>, + // Map from pov size to candidate index + pub pov_size_to_candidate: HashMap, + // Map from generated candidate hashes to candidate index in `available_data` and `chunks`. + pub candidate_hashes: HashMap, + // Per candidate index receipts. + pub candidate_receipt_templates: Vec, + // Per candidate index `AvailableData` + pub available_data: Vec, + // Per candiadte index chunks + pub chunks: Vec>, + // Per relay chain block - candidate backed by our backing group + pub backed_candidates: Vec, + // Relay chain block infos + pub block_infos: Vec, + // Chung fetching requests for backed candidates + pub chunk_fetching_requests: Vec>>, + // Pregenerated signed availability bitfields + pub signed_bitfields: HashMap>, + // Relay chain block headers + pub block_headers: HashMap, + // Authority keys for the network emulation. + pub test_authorities: TestAuthorities, + // Map from generated candidate receipts + pub candidate_receipts: HashMap>, +} + +impl TestState { + pub fn new(config: &TestConfiguration) -> Self { + let mut test_state = Self { + available_data: Default::default(), + candidate_receipt_templates: Default::default(), + chunks: Default::default(), + pov_size_to_candidate: Default::default(), + pov_sizes: Vec::from(config.pov_sizes()).into_iter().cycle(), + candidate_hashes: HashMap::new(), + candidates: Vec::new().into_iter().cycle(), + backed_candidates: Vec::new(), + config: config.clone(), + block_infos: Default::default(), + chunk_fetching_requests: Default::default(), + signed_bitfields: Default::default(), + candidate_receipts: Default::default(), + block_headers: Default::default(), + test_authorities: config.generate_authorities(), + }; + + // we use it for all candidates. + let persisted_validation_data = PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: Default::default(), + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }; + + // For each unique pov we create a candidate receipt. + for (index, pov_size) in config.pov_sizes().iter().cloned().unique().enumerate() { + gum::info!(target: LOG_TARGET, index, pov_size, "{}", "Generating template candidate".bright_blue()); + + let mut candidate_receipt = dummy_candidate_receipt(dummy_hash()); + let pov = PoV { block_data: BlockData(vec![index as u8; pov_size]) }; + + let new_available_data = AvailableData { + validation_data: persisted_validation_data.clone(), + pov: Arc::new(pov), + }; + + let (new_chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( + config.n_validators, + &new_available_data, + |_, _| {}, + ); + + candidate_receipt.descriptor.erasure_root = erasure_root; + + test_state.chunks.push(new_chunks); + test_state.available_data.push(new_available_data); + test_state.pov_size_to_candidate.insert(pov_size, index); + test_state.candidate_receipt_templates.push(candidate_receipt); + } + + test_state.block_infos = (1..=config.num_blocks) + .map(|block_num| { + let relay_block_hash = Hash::repeat_byte(block_num as u8); + new_block_import_info(relay_block_hash, block_num as BlockNumber) + }) + .collect(); + + test_state.block_headers = test_state + .block_infos + .iter() + .map(|info| { + ( + info.hash, + Header { + digest: Default::default(), + number: info.number, + parent_hash: info.parent_hash, + extrinsics_root: Default::default(), + state_root: Default::default(), + }, + ) + }) + .collect::>(); + + // Generate all candidates + let candidates_count = config.n_cores * config.num_blocks; + gum::info!(target: LOG_TARGET,"{}", format!("Pre-generating {} candidates.", candidates_count).bright_blue()); + test_state.candidates = (0..candidates_count) + .map(|index| { + let pov_size = test_state.pov_sizes.next().expect("This is a cycle; qed"); + let candidate_index = *test_state + .pov_size_to_candidate + .get(&pov_size) + .expect("pov_size always exists; qed"); + let mut candidate_receipt = + test_state.candidate_receipt_templates[candidate_index].clone(); + + // Make it unique. + candidate_receipt.descriptor.relay_parent = Hash::from_low_u64_be(index as u64); + // Store the new candidate in the state + test_state.candidate_hashes.insert(candidate_receipt.hash(), candidate_index); + + gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_receipt.hash(), "new candidate"); + + candidate_receipt + }) + .collect::>() + .into_iter() + .cycle(); + + // Prepare per block candidates. + // Genesis block is always finalized, so we start at 1. + for info in test_state.block_infos.iter() { + for _ in 0..config.n_cores { + let receipt = test_state.candidates.next().expect("Cycle iterator"); + test_state.candidate_receipts.entry(info.hash).or_default().push(receipt); + } + + // First candidate is our backed candidate. + test_state.backed_candidates.push( + test_state + .candidate_receipts + .get(&info.hash) + .expect("just inserted above") + .first() + .expect("just inserted above") + .clone(), + ); + } + + test_state.chunk_fetching_requests = test_state + .backed_candidates + .iter() + .map(|candidate| { + (0..config.n_validators) + .map(|index| { + ChunkFetchingRequest { + candidate_hash: candidate.hash(), + index: ValidatorIndex(index as u32), + } + .encode() + }) + .collect::>() + }) + .collect::>(); + + test_state.signed_bitfields = test_state + .block_infos + .iter() + .map(|block_info| { + let signing_context = + SigningContext { session_index: 0, parent_hash: block_info.hash }; + let messages = (0..config.n_validators) + .map(|index| { + let validator_public = test_state + .test_authorities + .validator_public + .get(index) + .expect("All validator keys are known"); + + // Node has all the chunks in the world. + let payload: AvailabilityBitfield = + AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); + let signed_bitfield = Signed::::sign( + &test_state.test_authorities.keyring.keystore(), + payload, + &signing_context, + ValidatorIndex(index as u32), + validator_public, + ) + .ok() + .flatten() + .expect("should be signed"); + + peer_bitfield_message_v2(block_info.hash, signed_bitfield) + }) + .collect::>(); + + (block_info.hash, messages) + }) + .collect(); + + gum::info!(target: LOG_TARGET, "{}","Created test environment.".bright_blue()); + + test_state + } +} + +fn peer_bitfield_message_v2( + relay_hash: H256, + signed_bitfield: Signed, +) -> VersionedValidationProtocol { + let bitfield = polkadot_node_network_protocol::v2::BitfieldDistributionMessage::Bitfield( + relay_hash, + signed_bitfield.into(), + ); + + Versioned::V2(polkadot_node_network_protocol::v2::ValidationProtocol::BitfieldDistribution( + bitfield, + )) +} diff --git a/polkadot/node/subsystem-bench/src/lib/environment.rs b/polkadot/node/subsystem-bench/src/lib/environment.rs index 958ed50d089..2d80d75a14a 100644 --- a/polkadot/node/subsystem-bench/src/lib/environment.rs +++ b/polkadot/node/subsystem-bench/src/lib/environment.rs @@ -118,6 +118,7 @@ fn new_runtime() -> tokio::runtime::Runtime { .thread_name("subsystem-bench") .enable_all() .thread_stack_size(3 * 1024 * 1024) + .worker_threads(4) .build() .unwrap() } diff --git a/polkadot/node/subsystem-bench/src/lib/lib.rs b/polkadot/node/subsystem-bench/src/lib/lib.rs index ef2724abc98..d06f2822a89 100644 --- a/polkadot/node/subsystem-bench/src/lib/lib.rs +++ b/polkadot/node/subsystem-bench/src/lib/lib.rs @@ -26,4 +26,3 @@ pub(crate) mod keyring; pub(crate) mod mock; pub(crate) mod network; pub mod usage; -pub mod utils; diff --git a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs index 9064f17940f..080644da92a 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs @@ -41,6 +41,7 @@ const LOG_TARGET: &str = "subsystem-bench::av-store-mock"; /// Mockup helper. Contains Ccunks and full availability data of all parachain blocks /// used in a test. +#[derive(Clone)] pub struct NetworkAvailabilityState { pub candidate_hashes: HashMap, pub available_data: Vec, diff --git a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs index 53faf562f03..3c39de870a2 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs @@ -36,6 +36,7 @@ use std::collections::HashMap; const LOG_TARGET: &str = "subsystem-bench::runtime-api-mock"; /// Minimal state to answer requests. +#[derive(Clone)] pub struct RuntimeApiState { // All authorities in the test, authorities: TestAuthorities, @@ -49,6 +50,7 @@ pub struct RuntimeApiState { } /// A mocked `runtime-api` subsystem. +#[derive(Clone)] pub struct MockRuntimeApi { state: RuntimeApiState, config: TestConfiguration, diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs index ef60d67372a..7172969a8f9 100644 --- a/polkadot/node/subsystem-bench/src/lib/usage.rs +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -17,6 +17,7 @@ //! Test usage implementation use colored::Colorize; +use itertools::Itertools; use serde::{Deserialize, Serialize}; use std::collections::HashMap; @@ -37,10 +38,16 @@ impl std::fmt::Display for BenchmarkUsage { self.network_usage .iter() .map(|v| v.to_string()) + .sorted() .collect::>() .join("\n"), format!("{:<32}{:>12}{:>12}", "CPU usage, seconds", "total", "per block").blue(), - self.cpu_usage.iter().map(|v| v.to_string()).collect::>().join("\n") + self.cpu_usage + .iter() + .map(|v| v.to_string()) + .sorted() + .collect::>() + .join("\n") ) } } @@ -101,8 +108,8 @@ fn check_resource_usage( None } else { Some(format!( - "The resource `{}` is expected to be equal to {} with a precision {}, but the current value is {}", - resource_name, base, precision, usage.per_block + "The resource `{}` is expected to be equal to {} with a precision {}, but the current value is {} ({})", + resource_name, base, precision, usage.per_block, diff )) } } else { @@ -119,7 +126,7 @@ pub struct ResourceUsage { impl std::fmt::Display for ResourceUsage { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:<32}{:>12.3}{:>12.3}", self.resource_name.cyan(), self.total, self.per_block) + write!(f, "{:<32}{:>12.4}{:>12.4}", self.resource_name.cyan(), self.total, self.per_block) } } diff --git a/polkadot/node/subsystem-bench/src/lib/utils.rs b/polkadot/node/subsystem-bench/src/lib/utils.rs deleted file mode 100644 index 75b72cc11b9..00000000000 --- a/polkadot/node/subsystem-bench/src/lib/utils.rs +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Test utils - -use crate::usage::BenchmarkUsage; -use std::io::{stdout, Write}; - -pub struct WarmUpOptions<'a> { - /// The maximum number of runs considered for marming up. - pub warm_up: usize, - /// The number of runs considered for benchmarking. - pub bench: usize, - /// The difference in CPU usage between runs considered as normal - pub precision: f64, - /// The subsystems whose CPU usage is checked during warm-up cycles - pub subsystems: &'a [&'a str], -} - -impl<'a> WarmUpOptions<'a> { - pub fn new(subsystems: &'a [&'a str]) -> Self { - Self { warm_up: 100, bench: 3, precision: 0.02, subsystems } - } -} - -pub fn warm_up_and_benchmark( - options: WarmUpOptions, - run: impl Fn() -> BenchmarkUsage, -) -> Result { - println!("Warming up..."); - let mut usages = Vec::with_capacity(options.bench); - - for n in 1..=options.warm_up { - let curr = run(); - if let Some(prev) = usages.last() { - let diffs = options - .subsystems - .iter() - .map(|&v| { - curr.cpu_usage_diff(prev, v) - .ok_or(format!("{} not found in benchmark {:?}", v, prev)) - }) - .collect::, String>>()?; - if !diffs.iter().all(|&v| v < options.precision) { - usages.clear(); - } - } - usages.push(curr); - print!("\r{}%", n * 100 / options.warm_up); - if usages.len() == options.bench { - println!("\rTook {} runs to warm up", n.saturating_sub(options.bench)); - break; - } - stdout().flush().unwrap(); - } - - if usages.len() != options.bench { - println!("Didn't warm up after {} runs", options.warm_up); - return Err("Can't warm up".to_string()) - } - - Ok(BenchmarkUsage::average(&usages)) -} -- GitLab From ea97863c56c573879564891fc83a9543f46bd95f Mon Sep 17 00:00:00 2001 From: Dcompoze Date: Tue, 26 Mar 2024 12:45:53 +0000 Subject: [PATCH 029/128] Fix formatting in Cargo.toml (#3842) Fixes formatting for https://github.com/paritytech/polkadot-sdk/pull/3698 --- polkadot/node/subsystem-bench/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 2570fe9cfa2..05907e428f9 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -56,7 +56,7 @@ bitvec = "1.0.1" kvdb-memorydb = "0.13.0" parity-scale-codec = { version = "3.6.1", features = ["derive", "std"] } -tokio = { version = "1.24.2", features = ["rt-multi-thread", "parking_lot"] } +tokio = { version = "1.24.2", features = ["parking_lot", "rt-multi-thread"] } clap-num = "1.0.2" polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } sp-keyring = { path = "../../../substrate/primitives/keyring" } -- GitLab From b839c995c0b889508f2f8ee6a1807f38c043e922 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 26 Mar 2024 14:09:11 +0100 Subject: [PATCH 030/128] Update bridges subtree (#3841) Updating the bridges subtree hopefully just one last time in this formula in order to make the final migration less verbose. --- bridges/bin/runtime-common/Cargo.toml | 2 +- .../chains/chain-asset-hub-rococo/Cargo.toml | 2 +- .../chains/chain-asset-hub-westend/Cargo.toml | 2 +- .../chains/chain-polkadot-bulletin/Cargo.toml | 2 +- .../docs/bridge-relayers-claim-rewards.png | Bin 0 -> 35621 bytes bridges/docs/bridge-relayers-deregister.png | Bin 0 -> 10115 bytes bridges/docs/bridge-relayers-register.png | Bin 0 -> 51026 bytes bridges/docs/running-relayer.md | 343 ++++++++++++++++++ bridges/modules/grandpa/Cargo.toml | 2 +- bridges/modules/messages/Cargo.toml | 2 +- bridges/modules/parachains/Cargo.toml | 2 +- bridges/modules/relayers/Cargo.toml | 2 +- .../modules/xcm-bridge-hub-router/Cargo.toml | 2 +- bridges/modules/xcm-bridge-hub/Cargo.toml | 2 +- bridges/primitives/header-chain/Cargo.toml | 2 +- bridges/primitives/messages/Cargo.toml | 2 +- bridges/primitives/parachains/Cargo.toml | 2 +- bridges/primitives/polkadot-core/Cargo.toml | 2 +- bridges/primitives/relayers/Cargo.toml | 2 +- bridges/primitives/runtime/Cargo.toml | 2 +- .../xcm-bridge-hub-router/Cargo.toml | 2 +- bridges/scripts/verify-pallets-build.sh | 3 + 22 files changed, 363 insertions(+), 17 deletions(-) create mode 100644 bridges/docs/bridge-relayers-claim-rewards.png create mode 100644 bridges/docs/bridge-relayers-deregister.png create mode 100644 bridges/docs/bridge-relayers-register.png create mode 100644 bridges/docs/running-relayer.md diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index fac88b20ca5..f00ba1c9734 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -14,7 +14,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } hash-db = { version = "0.16.0", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } static_assertions = { version = "1.1", optional = true } # Bridge dependencies diff --git a/bridges/chains/chain-asset-hub-rococo/Cargo.toml b/bridges/chains/chain-asset-hub-rococo/Cargo.toml index 07c9b3b5289..55dc384badd 100644 --- a/bridges/chains/chain-asset-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-asset-hub-rococo/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } # Substrate Dependencies frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/bridges/chains/chain-asset-hub-westend/Cargo.toml b/bridges/chains/chain-asset-hub-westend/Cargo.toml index f75236ee1b3..1379b099a2a 100644 --- a/bridges/chains/chain-asset-hub-westend/Cargo.toml +++ b/bridges/chains/chain-asset-hub-westend/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } # Substrate Dependencies frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/bridges/chains/chain-polkadot-bulletin/Cargo.toml b/bridges/chains/chain-polkadot-bulletin/Cargo.toml index d10c4043967..37e060d897c 100644 --- a/bridges/chains/chain-polkadot-bulletin/Cargo.toml +++ b/bridges/chains/chain-polkadot-bulletin/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/docs/bridge-relayers-claim-rewards.png b/bridges/docs/bridge-relayers-claim-rewards.png new file mode 100644 index 0000000000000000000000000000000000000000..d56b8dd871e8445e7cab49517123b0092ce09137 GIT binary patch literal 35621 zcmb@tbzD^6+CB`TAd-U8ogzp|mjcqEfP}=*4MR7KG>Ej)r4l0D-JK)S-QC^HZ*jio zd5-7(=lSQ|A4F%|Ywxw!y5qX9``RII6{WBqkvu{|Lc)@fmQY4QLd`@%LdJcF3clgr z_1*%1-FFa^QGEyw&xc0A;4`t~8x2PlsEMPCzP&M$sSVWH_=SU^y|J;4gBjFu52aBA zoWyFbs^KVZZ>;ZV4z;0DHMcefUm_v>&K~roV&|h8^#5J{yM&69o%1CX2e$zGO96H+ zPqo{xNJvyjG7_&$Dn zWI4CLRG2cFT;{s@nwPNQIH@Ga+%MlSB!4BtS&ToVq<|lj9!N*`PAWuVyA2K2r^up;6KN zbwIpjWkdctr7KuPSytk&Lozv9`LFY7I2XTs!F&9lBkFN>#+Sb?5d4|XuY}Ou=P1Ej!?v~{($RhaYuOjiAtk>{@}>Ux8fY)T<-Y&k4W+iPDOu@ zxX}7DG$bx3hoNXn+q}f0o)S-@w!z~fsAkFTkV+JbqkVoFJcdtNqSW4r8ef zrNHK9;mPLuHVxJMCJ)Qs&jArZMn-mYbk;d>Zu}Y{oNG)yGDZ(z&CBDBwT0$5!l5V7ogznVFf!s*AXP-K9F3l$B}4zWO{4 zc0o8=2tia=3{6e-a>0dKRg{EJs_|r5<3b^b%dxui#n3cnijVnPm4~^In@^0XuD)Zs z9@brl;It==7yVEqGYgBwgHFn|LfFmLTKH0(rPF80)+uOXzv#`7=sOjaqlD4Ms4dq2 ztbx67%|f&?Ma0_o+1F|Bp-{H+^73SUhrodW#YY$z7duc`Z>q2dstzMFb13BYFm8Bc zMCS>7qSRE>MFXn&MIL81_V)H>R#t6BoE#pfRzegwZ{ECd_3$WlJupyKRz^Bs zqC`kLIBkd%w{>aG4cy4LU)mTj{iCoYj&7F7f#`9Q{AOS-Vx;CTIQMuZR|N6C^G0oE#xQ4^p0KKpKQ3IC5lctfafU02pXwVxp|2 zC8wc5Bp1g@&%@)a(Rl_YYGh>e_QQuyX=#*9OiV1=RgonnoVVwlqS5j3U6bXO6SXdE zg$oys0VUo#n6!NHQuW>m9B3CkuPThKFOfw6thc+taILD?Uj7 z_3M+%tXmNH_u$CL8)@l>p@eKE1IhexZKn%ClqC*pieQI>LqhaddcJC~5TtsYYRbyV zUDP&S32r+J!iA17fmK~^n~IVilNpJtf_&-*Q}tQ-K7JsLNRcuYdl zb9u6rlbhSR_9a-d?QnyB|M0I5+^0`xX2@glE*(_uj1Q5*B7F6$3+{co6nM4|Cz2ssDPP z%Z}+c27#sM)H;JlZJu$NQ;?SKN?%)V@<9QU29Mdf+#LfJsKE?IPEP(MHg*w=`%KKW zzP)WUlm(tf_4mw7e;^hqpT{W+?~Eh9N|rn|JNtfgyoZN}+d=^0cq2@7VPOG05DP(X z4{r9(RX`ug*M~-;R6X6Ww6?p01R{Cz*VEclYj1N z_N?k`)?))irO{;B&Iro2l-#n9PU# z%FWFU#`!`-Yy5+uo<+Z9%5mEFpx4i`GJ#*D?z~NIR*hsl@TIMM2aCMq}+1s(2w$ZK00IAjVCGW zYOk>oLH0E^_RrLmoRSibVQ08;$7kYc7~&?5{ID=1!~fT>=ZlMr=U0sg4+{5nYMbhn z*wSgp3@tlH!j->;=@(*m9&LRm18yS*N=pMZ>oKXdA5$ensh#x^LT(*>JF_Z+M z0o<0OdYmUt@`4D&+kvqh5IleWeD*6h7x~iT9x#W&nHdEoB_$YnZA}fUCgCNX=FZN} zjW~8kXQv@&!d)m55fNe0EXjbm3MBPAPb^)SqeMU*9bbYSpQ&{j`tzq{V<;OV19&^XQ#r>#O^qxn+NhuMBiHU$AYK$EQfkt_=!YYs zw>6q?WNcjQyfYcvld8m%Dw_J&a@adbaxB06gISf0F8Tr8JZ@j#O zLGYNF}kTFlCd1#vTn=+9C}(g5pNG%z?~0fnxCsDu*3 zsI;r(97fEw8f{-(Tznr5O+`ba3)nwQuUO+DI(qdlYQ{JJiF3EHVVlXy!otE|;^InO z_H==ZwjQqZ5{r5ZQBzZU9wp7S0qgcd!&y9VEt=W^cw}j5NgU(}Fysj#AwCgNSI<}G zYLFR3)mgy3SHlH&o2I6cT7z)TaK?CkgAM5c2<~oafiabpm0?!TKs}d8r=6#)%PK2X zR8=KZRECUWDB|!zx;<+|crnPwgE+;nudgq(--L1_ehBi{XAp!z_9YT@W?@z@prfMt z2*TFgRPP4h;ot`vYDOy{<0KjFPl0f-CODqw%Ix;!! zxS`>krmVF`_q<1BvAHX3r<*aY&7$0Z2vsVa>|1trP}5Vw*r~ec#U2)=?n*0m; z;z|!*{{D$Q#|WXIhzR=l5vixQ7X*K5A)#b?x!7ma^nW?b$7p5yFrvU_U<$2+*y4Z9 zqCn+`K=yx?Mc3AE|F62?|0=Ai0I221figNMjd0J|>@@Ef?bM%6-Q@i`wK8fwJ^a@U z3M3skG+`4GuR)<_?{%n37+GG9yT;xtC_14yAafSn;rK!adWL8JAfMi7?^%Xh+0s9l zsk);AIyV4JysN7VDZRG#D$%MEB;@R)7RYf}SeUC&s%6Q}H(*lp&U1WlG)ZLu9(8nd z)LFRpljCAzqobl;OG=^|8ynM-&ju5ZWT@`krYt`hvwt7wA%&jgeH3xM7C!m7gVo4{}iEmka^D4w3K0Fib{;u@TB~oqH1FdR8Mza01OSetG_?9 zrsmQCTG`N$8XD7Bd{p?x~i`!!yJp{~$UO?nAX=v=duIj9Z4iv6e8yzn3^cYZDMg3(sxMb=L(L!2Qu@cCt_HDgWq!o`j9eG}lqDQ&ZpmpZ=#)EVfvQPo&$rIn560ovnk|sbJN=ZopE{zJl$dXSG z)7B=_)zuyIM4(}lr32^TcUaXvvW885VNwAgm$o5oOViBkS#xtUnUHHFz{X+Dd}DoK zXBoXlv?t9$jZNe==FWag13}fj$T}|+6$y+=Pn6)l_h-_IEH2~Dk(0pw!`G~n1SV}= zCZxUe=tGcos#4xR*`4f=gZ(t@hmUcdNqVDw{}G(5G~HGxz1r)+ypd(APGM6@JSH{q zWxC-}XP8US1INMH%?^*%M-(+)c#c)Ju>_Gf0}et%`|L`2w%V1v;R}@+^WzC`bj<~r z4I~9g{$(*2T6Wjxc3@fVlB7v#LRJ>T_05fx6$}sypo+p3ZET1$sI9Ht*xp_S>9)g& zbHTp43l7iz@xxF8PioC$+w(C%;*CXKI&gp*6f7;b%!2F`!Y)f6D80wtO6^PEmM5J3 zhE3q<#Jz#;r8FnRSUM4zAf@Sq(yybJa)zWdZOO3JZOQ~W$DmNyU>@`23ss-%$y~l0 z_0Hcz2$x%rv7g>*?o0PB?faK5#PRtH)EICOWqVS|g`x z8jYn;>1UYPjVm_D6= zFZ|TMW4jjLGs}dte>8bC=lRtdtv5$_3$@hmxX~kC^+%*ap>Z^1H^cICKxD>OA$KT^ zY-UJs^4d3NC__@kn#$^Si{P_QZ_Y&J6VI}H`Mb3cED7z3)50csQ6Vh1m(_F4!YfAD zW=TSwm8CYl6wgVut7@CbHuajt(E0=1o}^ecS^LkQ_dFlXm|(jW;kkHBVcoflBf%dW z%3*t(CVz-sn;8 zuMbxh)vi0O8H)FnG&$NS1exKDZG#`gMsxR+-|B`8VXYo82^|j^kE2;HrJt`rN(QRq zAve#;3#|(V7F`4BVN>5(3 z;fh7aaGfu9++Oooez_8(^+fs*CDiVQ-ftSE*Om^;${ek_gMn+FB9nmEM9gHSl{RMN58KwvC_?!ch<@_5$Z97kd95YDbj}9}PTZ$~;7V z^MYInruwSe9xZ$+LFh;pyS$%*J!naz$d}s%Fri&uQ@o2m>sv0W_!PNDaogpkMi_F7+E>}wp;Ag+Q$D0>lZXt z+N^nM%mBe55z=P>_T?@Tm6YJMvP(-#H`!F;R9iiZlXDLv7mNjn6_3v!(_9zOn+hc3COck)ta1WJ)XJAfOH);Bj7 zN420UOA(^G2v2VW;CdV#9dRir6lG<5-dq7XY31f@F1x(^jzj=Cv!l#>=wrje6@yZ0 z-*Aq~1SoU?r2vM^Z*9o|_@1ql*0JHZxV9!`Z+{qB{|lQ^L>{0n0R91Dbhz571jt_O z^=o7-EG)pGL`O$I#KvY|V~YSYJ!L$-L*D>ez^pViHTx2|F<>wlAT9vkMg^c5h{f#Q z*RDv;d$Tj5>T#_LdZ2s-t980FC8eT*4{93#{LVUEK$Ht7;}-*2TP^qN@USXUU0ofh z7Kjd$XQU;Cncl?lRt$g52qYO^ZIl~LgXnD|Zqd#0zvWi=f8TtitX@bpJLflLiie6t zSO`gJ$AArVa51NF_qHA+Gf_jex&u~(-y7YG36yx?$w2BclO`0p!^4c?1UBwrD)DO~ z%2Vso>GQ5_FjpsqcwxF=n#{oV{2wrD)=rBZ-MVzU zx}o^sQ_wqIXXa`d5*AtY=-hX&&FSx@TN=S+cq7_2ZI0bi>!MrHdafR7HtYw$% zqRj<5G23@jPYe9N1bb3f*v#}Sv<9)&ZVS#`V1qjJ%9pmLZdHkAf7{YnV!Gion!&vd z3uk<@2Z^FOoVZsndNpE5f~$LF7HWx&>L|jOq;lepkv8pP-;ENrM04i`AKPRZNuf^7 z{iL&z5y*w&w0Q|7eLmwU>OW9o2_>Urhlpavpt(pvv>e)bs02Rk?+SlWZ=HTN-zW22 zOSr0sSw9uU*1n`gL0bC>K@(9P`5~IA3^jcg-KeK2gIWx!*o)uq#8xp#yDBqm$10Z| zZscX!j#Xe3t63R}tI?l6#VMZLRnV%(Xq<0&gO&Y!<55Duj_1-+!S{1Blm(Xp{r6vp zm4YZ2#LMll1v`?yesQB3{J44;^U~FVsc`?*Pvra+)f5iHhzewNGgGwOIovb60Ugrr zXFR1w{Xg0sI1kPrGZ;1D+LcqTz2mV%&dus?=VlBwJvua3l-+<;8~?W4{_gR`(JGLX zk%y&zTwL3BnBYPq*GfAaDTM0A1U>bnqKMp*TklZ~xGQ8nugjN!aK8WG0kRi*RdO;}OKWQaFO=fN zix+@HL;gK60V*#~F$;_5uV25enxW+~Jv=$gvl)(24iru}{!&f!YPt+}@>ipi4d_-N|t6}=q^1vS;UmgiVutZ^FF=-EJKyl81a?1 z+Vl1qhbszS`O~b!{n95C_R;QFnZzHCmL>4uUKJ58@7_kfal`5h3#VmuWPZ^3OD_>i zXyuD~b4Jq6s~2v9p%1Tn4D%A_&VKiM7shyHr}8xeR|FTpVs@>X|j)G<$`ON5~|)(b;w_I!j-*S z(4*oi@OQJGf2!QcxKAp#$bvb8+BW}iQnowfL-3>Q>N5Wilw8TrzY(A<Y&1X$fgDewmdHHdcL)mC#0oDkh7+DBvDG z+i?4aqgc_`$WV**AZvG;@LT(Gt46V#*f)X?_j0q3Wz62F^A3g4gTqI;tk4(JzN6mHon z(xL0QQ+i|ZYx|1!TMSXRN8D@fo+(4gQm49vc4s0TB@S#|dIWJFCzQ^&@{{Z@%El_I@WX6HcIXt$t+tTgx;KKp>; z;>C^B>>HitAu_%Zq#=0!kCTn7iiPBebeT3K2fcZ!43V_&I6HkQalte9!$mX>DAR2(HT!ZMnvEMK9P?M8?^I?d2PsUZ*M_}CV06ygQ>0Ou;|;6p&Aj? zaw59gxfXZfALF_vo|gzkPRtS7X;Y~Pf!sO{1)O*^O>XO8x(60SudGtUHIWFi()C@uaD2CO`PRZ8BnD$DQ6m zne{yQQglElYO++#hGj{XGpDIAdG27Rn}Zc^#mL$rKX725I3)IrnO|YpI#kaEon8S~ zSyQZffvO!_-6{xm@U0fcYdZ<7(HlC3fHxIRMz@`N&l2KfEiCZD?YyMQa(`f8F7BZGmw9W3)isKvBj)f!5P2S^re{0?6;SszCeJbz! zAd#}0*L-=LO~{o3!Nn$aMc)ZbyaOky#q#}sOM3LaeIWc2c1|XW*q@GL;95Z6$h(o83+3JY zM8*k9-k({>ye4&mLiam#7Z$5~jvn)|;!d+(!Z=w7uGd?3Q2RnM`rsxv%!q-ZH{C3< z^^xo5%Lu#P8p40l|x= zbE*m09uDVi4bWI)l_dgD-R_^33p*wXC1o0Q<%)AH)AQ%wfBpoN(FaWKm$Q0P#1=oB zKx-7h>ajGs16LvO0WDBpw8wK8UV0ea)g7s+6g9QAfL7@%TC%jZ&IIK}e-iH*tPn6S zr>D*x5tK@=ULiR-IlbiOeoRc<-5tYdZej7gqM|}{(0R3w82Bjoym^)MrFm@z@XMW@ zoogi=B!SH!nsk74{*09s(E2{05!h?Ba#RicNCzUr^l{g+HhgWGn_a_Z5QW`_)v}@S zd@<;q2c@-mu6y>IxwnlLaiN4@uIyVQL=|P;>fb~#e~A`)=(Z?MN4UNd(5ZSAJmHB z4RM-cDM`c@nVvb9KyJxvkJR?ZT~t^RYK_5N+|CWfQzgb2=;-Ku$^3n+6p%9g=~6xy zsGc}$MxKUv^oz)X9|?ILrtwo}Jo~F+N%v=o(r$&VN}^8dNBSO_{aD_K>k`73R$Xq_ zOlK}ferJbW2YHc|F(TXD{faDf&46r1=HS4jrB{~fl42WGyh2=^bG_A@QqWxVl5r7x zwZuV-0ls1qb2%fW$u2Ya%!TgYHTLeZZBFo=?LmG-VRU{+WnM zz}nBypIS>-lPmBKjm$H!&q@p5V771T7?Z38M~{|wB}RoR0wqF&qU$`9^KEo5J1HS` ziVhU?8_c%J2>0`|_Ey7tO7O4IuhUrPTo{MBKl7WvC{;UX+KJge9y0N&`zw$0*R@vmx^JdUS^8d zYXefQbp3&^=P21+J#gINjQ=9_IHT{OpoD~m{zy+pfqkt?^luOKQK#kE|@zxZZZal>$yN zqoyVi9JkfQz(s;)Z0>qApa9+nWi(ceC@5Jcr>210+@RCbwJt`A-vS_VwgQl&$ zMscs&@|W4ZU1Sa2m3RuOEwjeoCjCyAUvRLqLU3ZFc~+u^B0?V+oaiX9{(O1})bqfBB^p8m-$ zOM06vF7NCoTAs*5aX}`&)#fOopalIg@wmqZr3+pa{!69>Z@Y{-c$mYuMQ_o28F(X) z-l0=8kvw_Gx<*y{G==N)}AGhVZeo4DNW+c4zV#7+>LVD0Cq+Zt{#^Ar? zRgsLhmQCP46`UR&9fRWjC*{RL)libH7M~u&?>SNzuhz^T^4KwHmAsB+4jXA`s2VNB zN)`H~qEyrmY%5|X)tEzaa@yWZK1?HPKc|+AJ!cu4ddm{koguo9Pf;rzc{nZ9GcaRV zJy6qRK)-)kQ~UIe@`++A{`BdQVwwmbRGC53H#08}Gqn9q9CO#rN8H}L+=&p7qN1WeiDi3vY(8zndv}@AQv7rnQ7w>9 zkdTsY?Cxe|WMqg=aPJ>o?5Mtf52OKCfoIv!s@klqtlM!U5YIv9r`pQiECa|+^mvk( zo;?HDDgngoTSFvvfam~mY+E}6svrH*iMy_Tp#Cuq|2PngQD#rCZ-Wc zMK@PlySpCZ6(BJH@Bn((mxXCZou1E7A}(yhPD^Xv?J=$JYsn09V63t-(pPIPmkDST zH;HVY>|m(aEkg}jLs3Hs`HBDuBDCe>a3qzmq#}I?>8xe5Zn+9ixL=doCAu5fu!U-P zc~3bHUVM^?MHCuSq}k-v5#XZDX&1NSV?@6~>SDLQAdTSLa zGUwAi6;_J9&E(|}5r5O~8vZm|eTRx)D7h)z)4qbxpet=D#jL0H>zIce;^H~@l}wSZ z!sZxh>=nU?j;O1S+&m0vMyJH5KUzZ=SrYhQkb60qKhyoI&9U@T!aujNqpX0PuvXJP%o+7Ds0BdPn65^*vrb?J^^XnR~mn%WsFD zD!N8e>Nu^ubacZv;ISoKdFSspBI9g@;=(>C|HpsFs+Y}R<+!c6uy`YfPAiRms~yW$ z3Xg~DWo4pWpA6kemUUjQXNbIV$dQws?H12-@fbI{Z+oNtBDB~?kcxv%5AO^Kt@%Fl z>DKiJPO0rY=nhB7C8dUXczl9l(y9Dy*5ByKawB3|wynr8$nTS|CaZgI>~N(-ygpHV zS5!+-DxCaE`^#{d&qQ3q<3BU1sTNH_6W-lT-T&?iW#;FH0iDV$c#*ZvCGG^`!K0w{RRpupsg{twr(FAi_?PEe+B%(UFvai;s(j39rS$zFbM5W zw>3eduA`RdoS&038o(?7hU{u*02fNYrYkQmj|p1BcUJ@rX3(>91%#sfKD2kSFa>4c8-3~Y$2LgeBWOXMJvbMJV44@s5A&H5xay^- z6nOm_G9?$m79AN%;8CB+UT)?T6(a2ep)5s@+p02NGLv&v)f3Ofct83*yMf$}K6 z{Jqn50j?;SJ=H}ohd%)lZKgSQ#AU&zpVd@CNa={*0;S@x?|YV{m2dk>^tknjRdT20#Y`cfV|sm7NfJ0>YD-CjVu+FZbf6+>s}(*=ikXX z@8dCa6{vT=uT-t1!kIS79%4MBh7&j;CcVr;Qy`>$_JAW)(@Tqlv6`FZqv8p(Vn3ES`KylD}h`sexSw3yW6Emya=g^mT;z*5)0yD2dA~yfS zaNI}h7g_rm3FlLcUV&Jl|0;God4S*~j(ipS(w#O%$yU3+Bo~OF+Qu4hPjbWIy}hBp zu})zrk&&+e@92AoO#cS_p0 z7ip=fB4%XjhZ~08wEGs;HYE1@?UU||@7|q0##s$r__hykH2P5-IOol51KQmFG25a- z3J1w)`?jBE&z?SxB25U2=1n!y(Rr>U6#ZS7g_Fj@<(g4xul#5n9;YKD_4A)156fz* z!Wz&LNNNSz{5^x(b0AHlr3`JHPC+T7E)p8dKyl}J*v)u+ddlgts|~udQi60nJw50+ zwKdtoKSVs|x^gqwv&lMw@C|mqf1``*Yelu2tB7#ci?dD3Y1!}z^u?^;W-LB65w5`p zvr6$+cE<4{u<_dzUsAC(5+qtbE(ebEs5nwu@Bigcs6Fp*P`l{x*;E>Otz%Qip-u zazS~ottUsTDPWyHvsgZ2-nDz@dBa_Qn5k#-$Mw%&lOoOdTm;MhpBVq|-?WM{O#k%K{`pO8_y4=>ze=Az6U^#PzP+LGmlQ@~ zSzv5v$S*E#?0q3`Ud9)=1{A1^3f!0ep5WoipFe+m;Tn6nx=;-nCeczhp!H{slM~MR z`%1T{C&$NtOtKXq1uqZio?QOx4Tn~yaq#v)|BRxPDPetm>xaJtR}vBpbYM>V#9Lu$ z6NFIc(&iYY&40^qmP<)S_r3m}3r$+k|M!)Lz^`APi@I54{97#Z&l*Otah&{LuR;`# zu4c^+VvGI#6b;kED*ye8#dJ}$@(QC;m?If4S%hgBZ6v9f2gXi~T0yvX)qh4dd>K~R zv`D%AxP~}UzqXVdG?StktHw`gT%z>8x;OPpQZSlSLWxPU5%B_XeRak+!Zq3aAH(P> zH~H+0#G4#$k~k%VS<5Ch(_(_WFN~K8kd_$X#}VHv6+9 z8AO>P+&f-LhwZmktCYucNx}49i=4u&;)$D_J$GB~@}+}EdcKAvQTnrdN>58pR!)pk z>fQyaAN^jn$jF@0tE2VtA7{GnlDfE$sB7t{4{QA}H=>~=CwPDSdjg4mhmObR2u{dy z=JY}y%nQ6Ylu_J|(yS6Mz>n!L#e}`*owCOD$eeg*UL>@`)0xs7Ej^Pj?q*D%ep=yV zINAC_6>myhOAaGd*KeL_C|-b`h(vbcY!{+|czyHi&GY}YRL;YK%J2t-WnW92hCIml z-Yfl>^5b9)c`vH1Xk|&Q#Z@{18I-Pt5}{)>b_gFa&TC!rAb{3r=K!*nwkOwCy2p0C zl@yk7J^wCN{@F@C3<8%*LHFEZM1*#X-L{0sgq67v-}i5~-rF2htcG@X^QxZo~TIkGkZL9Avcdco(a-tgX#EBfWdC+_s2IE{3v=~3zo!S;s zRWOWT!#7X?g3<@_2LT<^PS;bl+3_{ob`;5kO$%X-i!0&;$_;0RfBZsSEtj0bhk1s! zZO{gLDOd4D{j}=#vjS++(roTolT$h7qeWP+8ZcPC>q}|!eQfG3$!O0 zXlkVg@G1W3-@cv8y{;a3F8Osa!*akJ>0 z{0_3T@IEB&^yR@LlhZ#V8G*jp>rO!2959?}bKm#ii_IEbGeDIeu7k5sJpIKPO%C-G z^$46=f+k)0+G8PC=wwn~k(^($*VpKtz}+HwEI&zUnK0BhFd$+6gHH@gNXt8=g_g1& zA)WN^%;Wd9wY^lENs%7gHNCj0AYr&Jl)4!F&TiJVaxvE^b+RmYadAd4KgHg~t%MO& zs%t9s4m{V6t7nUJ_1?SKbDf36`wR?oy?z+J&0jl@5T`uZ;RJSnVx)Zs2Xb_vQRRC? zBMN%Oj8P5Oig?$bTrcKy7mA|V{6rxGs%vQ13kf@}a~FJ~8uPsi>fXJ*L0;7>f)QL> zZ2~;2?}x@0nv3MAs81~Ol6jF0BQ=KnLc1mpDXw}*56;o_9I=E9UXab43ha4%eyYKK z_%PhK`zE(~V>%~iA`YIji)3ILzSh>PEYefBmR0Dl!#B(5;Ks!q)S5ljOxDrg+t{j} zeT9AK#rO4!0J=)>ZpZ@HN=|R^u(sN~4*C7w!PT0BT?Z@%0^>?O_1d~PiB)D`x!@4} zKr{`z;i)JanzhCiGA59Vo$Jqw#Ng(Sx)8jj#{Zl!w>*}cbqrBQmupn0!4i_vUCY4= z7=A+eYJ#z_!ui^O~bX>OjWPv&7(!8to4sqnd?w86$0 zynRyl^*~ntw!jCaULuGuV|K~v?F0GWRE7WR+xB6YTXu#Q-72)?ote|zWWpcBX`9T6LOt#{}hIPjGUk^wu ze_Eg#miXnEpgXvb#G!hS_#Nxq{VHH4v!(SB%}&Y+bwEq7ah7aqa;apH5@oU4H;9UdvF z6Qlja`7MXgtEG_E@6*0_OoX@dgyr~9SYWS4JJhA@cZFyO zZ1UH5an&lK>;xEDcQ2{d2IR<;#&ehpH7|J{Bj{`PyPXwn_Nfhn159vf36E&+0XKc8+5O4h@OLv9 z`1p(H>s2k>b3h zGPeca;=D#@`H(1)$2PpY28j>-#TplPdrg7%OW1$nM}kGU9{M5q@l}`yWFgZ9BWDBM zaPtYquF8d#`-XKmcHb+U#p@Hx@L+mp-`5VB6vSGGte4f8@n4!09Ac{6Hdlfgj$Nzv zJ=z_=HvaG|eB1CN>hP2M2YLOrjzImFv4?}Zo)%#t5`n~Js;f~4-qbs<6n0HYK5aI~ zb}57m5m6Gz7vcTq-Bf!lokrozfuy33@6YHLe1*q1%K{53*>44|f-(FO-6HH5~*TN^gDz6G^tFCBlM22&I@o;u-G;QnpYzW4*YHtIodQG}~4yLLGEiGz?9@F!^G$w}M=PZblvKl}i zPV&#D`g>ECNX6MNaW4_O|H9a$DzrAEa?|8cVK9y!69}m5;kJS~(Mn~-Msy@<4<9bF>3vM3+(CGIl`k(n)&C2P^1J;O zs;(ekShsLZoH5Jw^-DY*o%ahVS9+df5z9AX+WcEwrrS&-Cv$CTsaJ!_YUR7ebGJO} zyk8b&+#;hotzDGvQMqLVrrq=oS0HXFJTaEu^IAUR2t_;~4PatR~xQ!CQwJmUa|`|ZkZ&HOO3Q5{C=q%9mQWn$rzQ{ zYpa#uSS=!}$12h-+S zG%AV~lOjqFjJ}hn1SB>!2{C93#xT(;5spPehxa$Bi7uneKlhUjynjP%HCq-HCTPOp zEcP+$WaK3|2D|fVT=klglIU92r&97csg)Qo(_X{V6o|=cFrAjVcU+p)cvsCBbm7xb zzxvwXNy8e{aHa9~%Spe&OXn{fzbs$6Z07fgmS?M~S8ukx)@6ps4ju4$(4v{sHjT@) zV;ZkNxCy)0wRL*UNx+)ea{6>`!8A#4u``xkv@tNvbIrVB*Da==JTrq4`5q=2PpYMf zg1(i>9AeIoP9_@sWSqsFjTDAk7t^N5tyy~}L#>6pgnmy-Mq?H{qfpY5LkWY8%U;_u zxLp)^n4)YAk;08KyP$yzCIdTZtCvy4pPwbR1y@40GQ8dTZhPVq_*uH6 zb}U_wi_5J%Z{HNuY{opP+Zq0L;pT5fMIk75IQ*20h{Nt4aV^h~BVCjKM4F-VYot?0 z;h6Og-q-o%rrIkHi%T<#&&snkt1W01iiggGeio)>NxZ;ObhGYpKteWvRKyW4)HPMU`H3H7m3E`5f#YNZk&em1bcs1TI0? zrV=E3*Vu|xK1%ZDiwTqEdFO~f?v0@>rSCIP+mM3Zf~!s+@~2!%aZ( zks$4M@FE`QT9bCbm%22KSBo%h1&G(;G@=?=}1QJDUYR$E+tmpS{;$uegd~_4(XKg-pe+Pk;d> zauB%adzaG5NTBi?Mmja7HWaJd*FyT|Vq}TbZ3D+T(t|w4UT+TrUb1=OwXAbRY=V$M zokDD7tJb?U#HTz+_Uo@TH{^pt9=`Z7k9xZe?HSjp4P@-i)kJKE%HPH)MVH&r)+!<8 z*G|+O^sP)Mx|Tara@lNO9@H39_Fw5Ac3(=F1mhN+HQ6LawD`y9uxg5f`fDy9{{NaNulX;gv{vokmijeoIIy2Q}^K zHvP(-FeT#jg7z})`XTk(}3%_lY7?{Ak1q=-eixwyPvlp@JYd)j1 zUHj%Yx}`Q9AQ=|28sCo;+7oQt%S$^<-ZL6I5B?Kz5WpmOG2z#!cNEK=H!Bh{#gObt z8RXFKtCe7XXyj{zgMuo**fY(YH>uoy`GdM3RZDU^BwMrcl*;<1Q&~G184G^c*Nasa zmNL*1LC~6Is#$zHUri}x^7ANVwlb#U$57ScPK1c{GZhgB${6TmN6lJl=HdyrYMpYA z@YwEh`Q^Z`vF4rfJW<1*s28LYmj?#m9mtO$bV{YqFP~A{o-UbFmvH%Xb?+HOuk!wQ z^)auRnkZ5cM&-6lWyfd9wC^|o^+Nm^5=6{QM$ePa^?J7^*ulN}`e9oO^&^C*yqjVJ zqHdw^58AyfCcDCCN7-TA*PC(h(&9}`-OHFpZz=EpF;<)ULV?VCaY`${^7`}Ubh}kv z7NluOy;c-IH&@(=gjivq*)*}rF$#+X3Iq{8>i^*q@9 zyiA$fl3%RGLJs|4b6vq@UOIAT$YdE76H8V*qRHP-!X2HNv{8M+XXnDlA-OL{uc9 z^CRPRbM)zO`fV}f>tAJAc%Sau91fk9c!?Ux$M-CMi{x;|q`l-WI&+CQTJri~HaFtO z`g1_3Yd@-0|H#H0G5n)V67`{C-Pa5C6quSji7id@EIF2ysAur`!P)6`sYSkVJ8U?z zk6d=&Y)nPkZ#foWv;ns??@b*LhB@^L#_bE*MUHuwC$Anmf3GDO_rm1-^huHM`lDAP zEHUjR8sGTuSJCnXi9!^vTY+N4Z%X5JN)n!G@sw`z3MP1N8D(h~2ksO?SF24IpJ2j2 zN+kDf?HaX!w>ux*Lvz~^g*E%Xi9!E=t(|pTRNMFWK`;=Iln@DJC_!3ULK+DP>5^8E zZjcb^?gr_SMmhxP6p(HhKstubXV1MC^nQQwyq?$V;U5@g=A3=jS$nOu*E#3&PDk|h zU}a-9UspD~J)Ofa`%S#N@*TC>2r(p9wtHVAQEI9lwq+hwM6JpFv2o?g@U5Z}7ox(UiemFlT(1k4rxULGW@p+k z>JqMbJ;~F@sJ#}>2VWN)a%xjxH5_!}R)bjWbc=EGXq6^to{vVS6gp%`raFE^!4!O^ zFR-kfTuO?HAPokxOjQqtpr{x#e2HM`1ZQS4C z7qYJ$TwaX9&Sv8}kP#x=UCYh)eILiGss2`ZeZNpy9kKL%4(6)e&SK;{uNWvGMX`-x z!nY6}N+`N#5Sx)%+VQ^XaKpHeju8_JFQ-3B4SUHk0%LAW2dAMrtt&H*H^OQ8_BaWF z#drMU0a2=Oe2(@5_VRvkot`JI;~8p0`+NCT>~|kZVJzhk@iIfk<8Uo#d5$hhYuu;{ zHUBhJO}W-wb=megxk1Txx!f-Eyh-6iXMBgG2sGHR`P-)1n$MbYfqSMZFBe~``Q8FoSsbYiG3-*7lLb=~_1ZMchlh*md8N%w2vH59 z!(yNNVId)*^$-aks7?PuKq~bL>u(6G+m9bpANeNW#WFXprc!G@u6C*zX7Qkc4O5D! z7s{DzXxcxv`ZZQE_L)cRg}M3n2QH*t&^Vcyj`t%0RMzWKO_^NF1pM60s=#iS zZe}lKWU|R-EX1T>-t+1EfTO{_os6-JfF2*%64AsciEnt4ZFL^yRwoT~-y^`HMe>dw zV{!Ug4V%8@avfbvM{O`J%e*p9gVT<%%SZ&oO33ih#yWE>^JQih<9yo#LnhWDZZnc6nsYsGw zk&PF{ZpadFnb3KD@10;7%F(7oKYEeigMYvDlbdVJR9JrW&j!QNF&McsuD%@>VTt$W zi*wa^Z<#$??CTAlXqU=MT)(0jNlR6zET$Q9*z>(As377J=>PW(eh_zZ3L6$85)J-p zHUFtXBpS2oX&?@N?-5b#c29fZ2E}IOU-M&^(`Bs!M>$PKg86u+~pB&o$9>a$h z%nk|V<;dRmh##{W&ECY%bS!h$sw`g7ps-_dBsVZl4NMXA4vUR_D{%d#6*PB#SCGUB zxvZ)4ealgkwvyxwIUK(v1_T65OAB5NCa15ny?ypq@d0X*q3kLec#QqCt1I`lsl$3R zTrV%cGa2(Am;Wvz4?Z$D7dw(?UYk?yyEM&}p&=RS!T%V)pGxO?EHuy7BdQg`Q09H7 zrEiE)I&mXqg!L)?GlZX_DiPJl*oC~UBbS?NJSA)cPU6--fcy z>;L(5Ixqc?@bJ5+t(jcGyjU#y;}hcPwXZOt+gL`1lG+zLO+|* zms+7?TtIKywqQfTG4FV_6S8>5!j@#&a&H&D6ph(6;}cuFz|^<9#K4p0c}{h<&hD_y zE?Z)D@?@s++2sp9?U|DIH!ruhh9Hy-^J=baUk$a={dY;8Yh4|&qi>>|V_q}jJs}*( z?fY_$HZ;QIgPcYAe)nH?Wg_IKFf%^9^4U8GM?zUm zvJcUZzae>PruD|Okz32-+fgiCeHyM$(UPmHx8IV6km_gZ{HRF24?%%Fx4B?M%yD5D zeRDYM_pxpm`C_g4g_|gMnc4MfZxZ+@DivoK%jE0vh}^h?xHCd)zdH7A^^o!oT70u8 zZ&ip&ivXFlMhkPNYlTO<>>Yg>oy{qKnl9e9okv(tT+1ScxKknHNS{O`=gDFC%xk;7 z+~G!FQqPnke|n`s?qGNJ>JPNx6>qPakO>^I%gELj1ZjAa89Nigc)5cIOvtb@EsyVN zX8U7qkwR)_4WcHWhrWRs2rFOi+_GT zE_og=8Im_%J`~EL2~izF$Id2hzG9T=Zs=0v9D&38Z{9!m zC8Ey1gi-%Eo4?!1F09?H&{;tL$WDlDZ0dute_*S<75<%G0=Gx}?Y!FVah-F1D@tz~_uBV$YtPK4N{3XxGQ^xCq8se=2tW zYvE9-`TVbnR*qUvDRglHwr~oqSkXsc=}u3;Wn8l8Si9*qpQ#nx$b_%AZyyZHem&gd z6BNZTCqo>g!hrT^_lGfqCG~dVR{kS(4MC_^6|}s(e;l!~n4S5#6(=qG>ta)9yl1CK z5zdacz{8y%mxKa@ZhsodPva!~taqV77l#cwneaZ^gzf({dC6#l331;OIL7^OWRMxO z+F82ru&pC=J?NAgNlYaBtYYg`Jd)j<6{ z-XgI2=tz7`r&Sy`{@fyrszxnHkh}JMY&luVD@?H;F1Xm<3lM~NBnzbPq?7L(iPu^M zXGRe)xvi^P4%&<2OOe$SSvgc&-BGWLxxGp48FlBi3{P{Xl(GG0`)5Qj~kcSEqV-f02k^C=dQNkei+T0*EGS1;z1=rz+)hF%HnKB;-9GPh2 zTlf0BSYu8+PifATiC_2e2p=!q?Q~_2JT>j)UEA^Oh*U5M3b~vU?yh?xcJ4}_QBt+0 z7{EZro}bh{F6wY(j@4MlEj^Q8M)$(QElRzUmpwVhLq=bq_l`C{3aoxNc(PKLxbF+K zq2Qu?E#o3lH}^978qswYOs~%r70xl&9i-S&Uk!}O!S3#SOzd-J%ekmqKRPrPMpjan zbRsCL)8=HG$ZtV7cxyzi?*09)-~e4~Jc#N?$vRyo;pQ1fOQXB`6n(M{IdS?kfpReu zodxm}qQco{!MAhlS@=6PF55Lm>a9>3^aLq_&#jR*cJ9o1-AUJ%mkUFs>Y@$Ib6D@^ z8jr8Q+;p29pn+xyoGBYB$pF1SWWSh;(8uLpJP1kYkyDWe+=C$bZQ85enV~fWVN20T zI{Je~C4*tjTz8etWw1I%ly=XDZnqOB2{R8z+Pn5TBxIsZl$%CcriZdm$j zP&VpP>FIvqFy;emgPT;*Z%1(Dj!j}#BWZnc^>8F9eFAG^3)Pckj*h))%oXc}Qmaftkzt1E9dySxT(gBuzDNJD# z|3qaFi;R&v{;YTmZGDrg5gTu^#%C`wZ*26H%v2IW3YKuK)q8d~OnI|Q=KRL2?8!2Z zYA&t?TqurYmrnDj$&K3{(B=C47RnewX=?s6^O$iZcSM1bwg~3)Oowh_e7AFr<&3XZ zvw=^AxIaR*xTA$jVTeZWH@*aLKkJnbd~h+&1;5yNfX6xRXs906YqxfV!iooOg(uS}tmzsp7F8tlT!-$<;l04q*w4}((>W>@eAX<{M7g@+fa%-%E4=%N8zESqkhse+O}%G}9ve*ZB#-^B z@~ib6f70x?HT|wG(>LqlrE^F&cyUZk9FJ@Gg#&EDOiuR^LHq0;rCH?A3>#iBV9k~-5sZqocU_cJ~W=tz2WJ|kYQdkQN=H-M87pl}*xnG^t4Hsmr|ReBD$P4JvXf2;MbVcrc4{b% ze+_1un3c3=Zg(dudXToZHe@!1k$c{nn*_t#_a=DubQfyG<|(XF#=b!|(0|V1ky&y? zbXaPyh@vulz2-ULFO`2XG>+38^nwugj<{FyXfIpw%9Audv)anG@&FAV+2SLM4FWQ z0l6)1Nr|)_Bk?E+WfX}U`&#zY#P=$EZeD63Mb(eP4#!)d?P~dmtXO+`y`G=qo?j;+E$8++9C02tauq}6mhb_GyrQ0 zjUPU*yxPynLjUEO*6HbG)?Zx#G(zSh?DWAJCN)(YG-593^ZIvOoQ>2Bg*lXyu6I=O z_s>RHBG-x8qx+=$bP+6aT~A!IiR;`b*rh)gX(LP|IR$y8j93n33ruxyY>rO_+_r3g z_aRWL;po(2QS*z8VvSH=k?Ihi|nQ(lFkLW2Ki3K z3kW0+7Yu!V7G#z$$Q=C|a!{1GuG<#w5pxMGebzptN=gA)Seb#My>TEH$56u+Z-OoDS@n4xd%TSBOM7|ZIZ$IJqB=?@|(Hyp= zXp)nu-N>d3PvD~k|8`D=u9wD7a;EWge+Kkfwe{!{@?*4~>dPrJ$2G4p?8MKU1&w=f zs&ilZch{t@QLLMuq5Y0#j+gfA!Z|KacfM0qbJI+YT6Y}UgMuOSgKTRcZEZT!)j@_gBLV`XCsDjyqGsfomY3A#Bfdut=XK;`Dp_QCbXKMVtP%$1SCnVWs3)HYs- z(HVoo!^0DAuW}M(Uj;k_0lFsVpb=s0OUfJi(Rn(0d?LJAO_h*Y?@r?^|H)y6!Br0_ z3^_!SBrE@EW249Ij?RZdj*hb*m((I0;|~rX`MzvHB*x3lC}x3I7QnVP^gMr`89(h3wZ+YAf3>t~xHgmYMUPEX!Y2HatKzaBCzEqwLi9GTp=T+Ih{7N&_D zM<18=v+q++j_wtNRTI=LOw+}9ib~Y z;qUa>Mbz8*+JeT?H(dpatewTeo~)wyFn!CxyY+RoUbBJE&($>!luGw*%V5!{G%J}g zP}pP9h>kp6Jf&SDMcx(z1cKrCbNfeVsl z6cinA&_GIbpp!oLzyIZU3eQ$(0dnfy{xqh!TZTTJ6^lHsGNf-PgW?4w2~y21f!3=P zv6{VD+~5>2j;KGmAB;-lp!O)|Tr9wzxuc!LtCv1WU}f}0HEl|ZR-v}TvFhq!c{r~g zYep%4$?W)~mCKvQ3=E4g`18zH_X0Fht%>1wFA#zIru%JJ-WU?UCL({{eSIJp@G_)LdhjWM|JOo}Go zqlV#L-H2sD-1l0TwF07frJp{Zir@!T-VCK>^VxmX++_QJAHPmxPY(*7kyWK7v?Udg zTda!tgvBq(?mB*a=P!44>Je=vG~_~O=oi=Wbjog9#aMrTfj6xn7Q*qhk4|?#Q$y$; zh1k7z{iU!|n%oQem&PBvkGjklT^~Zq^WThh;%CpyNHa#bh#(^W@{v1r=FD?#@LaMF z<7bSYJX=#XKcu@mNLJ;FbYG?*Gjj*QH$Yv2&ygF|0BUDTEo@BVnwqfcMZMwiB0YJ` z<*V+q#b>ev3QcDX%1r^GiG?qjNwS_JTfaZOf4uK;wgKBnHw;c&FBk5vVMZqYZ~IxIzJt;j*=;};jlNzADX^!mr+8bS*w*lmLy^a-t#eZ7XOMWs_Mke=6FD)rS-OF~>J zvxF#HInX(8S`PE$4d=z6oKL;SkT8>@oe^cl=oh5ahf=h^EbhB}{hO>&dtW83iTmr6YLC_y0((o`?L zZckzqk!<9GiukWmY#(C%kgT>UX-N>#@}&d~VYtj@NJWqF z*f+mq#a9*hmVQ)Bj<4LHIgOAxzYL!8%qXKGpOQ6+jt!rI$s!LUZu7FFUC}7nV-Z@F zmT-a*z9z58VCz;&iJ@J61yg_QcG@>`oBY2xh|8-AlBh#8*bWs0?vi2Nt+VN<`(fng zXN4lPmZN@w`xS-kzxKp%HfM=-YMNNLWH28-^9?&gC}z)9z7QHxLFMUqzsOTZK5MqHurm7*`LOb+DvjO#%RO;R?La|1T4@qEY z9@AY&&gGE2LfdZaepPHhc{A-lfo^~k%80$5i%@hBLSiFzWUPkU- zIpy)4agkJH>f@hERdyX3bO|nwh~K_NqE>EgwoE!5s^4W;>glZy;w0_#ih_5A1oo34 zDf2nFo}afD>@2pTH|Oh*DT=J_(~bytBsy@HwxQMfxLr-!58GoC{iq+mTXv)dDZl&( znOZkPhN$X5+`mk&m+}2=QU!(meQEGTtS+V_qj9-zg$Et!DLP1Wj%Q-ce*~-17rwxVlwS{ zcOzsc3OPi$ce`||f~*SXC4*6NuC5_xl*t$EmM+;B@_3eq-PZ3qlT&wx^ywPrUCl?n zR^du@+{FHQVem9Gh|KeLF10JAL^_hsu-uO2qYBr`9}L&!v*EA)t);7+=8z*~$A*LS z(L?aLBSPaVJof?z6w4XaRtch>MNffTwqe;Da>dP8h>@6qSO;m?4I7uE#R!n!u93wMOLD)RJMKjunXAG6)WlPFTcwqJB&WyrCPZ90B05#v}Y}br#YX>SNThShAlx#F# za6?Vam)qxNS1x^SvC`km{nuqiPy%O4INI_GlMAMcuH{VOxnogut6Jh)we4-+0BxqT zRrs~%Z){#z6MCML|Ew=@ zC*G)v=+)?M%JYB(Q@p)hQqtSd7j;z?JLdhlHi;fw3yirm8zQNu_`I~-@ZCXk(4^`_kTPQRLsQt`IKXZuqBQ8qX z@nsCy69ML9*G~1No(eJ>w*L3871)}oYxSR0;q!2dJCVhobrtG~bU4&=5#P?%QW9Uh z@E23K+9pN}iH*d0v3-N>b&m$3A+ePyzp6o}O?h4AYQ%sN30(Ki=}nVqj<_<2+`G|# zEexptSS5g7M{bd}NiOUc4ilfx#Yy31b%p#tm(n5PCGx9sKBk(2$JI+#$wh`p_wN(= z)Os_8o0)#rdHuBGa69hG8^yO2|G4g&DFOY8pPlQUP|6=+l$%mgouu66gwN9dN_g>v zachp^dH~bU{^zUM?nwy>SIvi<2$KSxm5OAYwhP+<@B2*WOg4LJ!PI;nQu=TEJ9bF|45R)=)T$g5RXg__V7NR!>6u`)x7QpL(iu9ya&eE<(ZKm z7ruT~uS2M!Ti^Kw>qouW?fp}$nB(J{xEU2V$AT1n&o40h%NpM73_apdl{qHw%0}tY z?j@#xKJojPTRg)qOvl#?Tf(tvmeQnDj(9v`QZ<}L@GZ&@T=k7z5DWP>KdKboh^ z|LMdFP`|OQ z)ljmop+|Q$$i3;+{-o=5h4?@VXdvgZNA;h6rklEMBS50JoV4thcU_(z)&Xi$oz(SR z94i{XWV8Oe8$%ASlP2gGtNi)m?;9xo?&@9}|3B^|`J)j-D7MyCtk*^nd{thC?YUUc zjR}zD!f-hw9)yd7i zihsqh{@o8(dGNXcPxEZ@f9xGC|6z3r&VNOqKaT$TT&MrMvuAtbgVdil)6f!K?+Fp! zOakp97DmucJ@Jf=0SBH^_guU-Bl3};7b6e37}<}83i)e$ui}JtER=nv-~fP+6agfL zO2(l=3YSsa&-C89qU{fOVgx*H?b|-N{`?0yph{T$-J38W(m?>`SeEsZ)IY6lyraGS z*9Pmgg-*T%1fYSgQp2pnKc0WogrCQ*lLXXEFU~beWUjWiqJDC?)#r-?Z;&MZ_C5*D z-%DI^iK;)A+P_Dh^Cm0eBmyu+%%Sa})6xqcsA44#Z0}xeIZ^446p(92%()0Qv_hXfk3bpv)OeLFBNyqQsG5ET{eM@i_&}qfPtzW-p zGom$hrmAg((qx&LnPXVrVp}bBzuBH`tc%G1$J9Al>h&^PMTUJOL8N@WLrPje`(**> z)y<3YonQg2y%pO{a0tF4X=ntDZlI-T1@xwe^VQlgT(*&bm)~pJ@1iV&Up)lfe@>7)w)A8iM`iXnW52Y=LyykG5bI zaGEj!45HzDqCp>=&9gzia~wbrd(#m{0SCeH97Dr_3BADqz`6kPPT}_8_S|=sBJIYU zphO^>2?t~471m2MxSYoTAQK$ahIyyR<=hb0*ryG@_mLI5WMQgcm z0>Ts-fc*i>Cpq5p75W$!hP_z}lU6L$>;VippyES8Mdj_ZS+%Ti*yDzX1m|KQ!Xa6O zwYzr##s#i42jDd&mWvVqCL&FZJD)~+P7N@HPXShI{un$jaB+4F$T1#p`U;>4?(~?s zgo{C%?CN2}fYMh5&_PFY7_MR*4=H0Mncw}S<^{AmAgCB)wKP6D3P<0-!Bi=)emy;; z1%BO(BLhdR0XF1uyC`o{8}9)V0)R4Q=j6OOJKBLW+-5E>4#BytLN+IpxK!RwSnKd8OWi1IUFLEEn+>yJoOOb29HDF0g>)7r zQ`WPFcMrtV-_~KJU$irQo=3+uE2{D_aYwJLG8uLHe(X^q{YpXdW6Tol1U2dFFpS5z z@FvT4>=ukCXpCpu_}juxk=YEdgoFeT$9Tla8Ef`oO^yA%U6uM7fGjQm#Ugti5^&ET z95x6v<$$wZY9DS+@$CPUhm(;irXBr(^X&j*`VSdISj3F)l_$;m$ z&I`O;TwF&x!LDfE;O7^pv$h2hlLLKtIP(yw&;pcY7m-#j=S1Q+9Njc1 zjRogD7U-dAIxOC=-;FE;zJ3GF4YZtZMoDtLa9CYiqoJo?0;eC05w33!NO6EncC&JY zdmIRB0@bAK{QQixG>^}p$y^Rfljjx|th^hr{!p#3SD>-M2jG}(FlCbfstJfp0V1Rh zAW^ATS$e{y=z%aYP#;VKVpM<%l!i|B>=_`L;QNP^8 z{m$ci-X7iElHCR@;GWL;M7!y#l;HY$UO?OfEE@#CTN(t2Eih+p19l-$vT8FZg(Jb{ zthu}sy@8r8;5h>KMe_dM2#(vx%#4bK1;-8Wun>Ba`7q!?8tC!X0$H4x9s@Wx1xWn6 zUci#w-Ji$MYg7vXNFR`s1A4tck_zs~!1&-74j!JZ;OOAUh$sk-KsNguztQrJ{b;of zT=z~NNcI4`+pdqg550Sqv^Kk}255guO88PzQi4fq+FC%I3rN_#Qc^Np&v`bf?&Rob zf4)Bg@=k+@ni z!!8V_JCH68=!=6te+oG}^H^9|ByigW0SY1F?Nv^hg2dz#9IQn48^6T- zf;=~(WccUwerHB8fmIn$`kS?_3CsloYzACKBNsz~%vI z<)6dD*tobt>gox_*3%$T!ecWW*9pR^dYxk=m}3$zUjlEM9WzYMC@c)tI$u?S>!X9Y zMg+5;_oWK(O9y@nP*?%lP)DHNERuZ_lYl@>O^pbMjv)ZGv|`gKE+9e+kBh+CfF3l! z(Ew5yzNUTS(FFjkQE_p_0ifH0WfeSp0BYp)ZW<%E*%5|uY|Re2fqS@HXzE*#Kg2dT8!kjzczyN@L_lI#U2$%MS>^~BqK=Sh%W4sr7J^LxzA9S^(~NFc)V4ZveU0C)mUKm!*CVbE|H zY;Y{VVnEFquo|_=5+0yw-8%erGCopoFu4%*R1HK}!jg*Nwq;e2VF3vcUM=**JpRAr{psz72Jw%2f^IsXap!+o9|W+r2&WyL=Bc)_z!S~+qZHDL27!Rz+Gvk zXm5WM!h5S>YkeJA`($rLK~WJz1YI7bydeoNLI4;1@`Y|;8EqCsJ}Mxi8%V@1_xkls zehZ78_cQ%RNA{ptVNX59{Vf-RNPag1I!K5H0RmEp0TLL%Z2`#xh~g_KDZyte@YQGr zttEi_7&T!t0EZ7v{^fCtRvRb`GsXAp?eF(bPImh<)Egz}GRK;7Q2|vs01&E=C&55L znFHkx03%1IranH{oazE%`YNEL2!Vmg2Nb1KN=gPQW!!)sA0XHP%~mp2)`+ZRmj`Y@ zMi>E90i5Z%nfdt}UX0ld4auM?l2uoiR8oqz-~+ndAPNH^CGe%$_E2(AJP;rb&l1D~ z=Inr$n%YFsVO&Cjq^T)A1qH>~K}7=vIk|^nl0)kqZ4eR1#>Rl=IIbI<9}FS~!2&>h z%BiYy={;m%U;rEC04z}uDZT;CJ^(cUryx5oj~omQsN(@VE4STdI$$1DOuXk=i{EH? zR%r*IY|TY&$@cRo)6Os{pxZYJ6!r3y3MpAw29cn2>UH zIe-E>h{s3A$5VSe01N|<55@%}Gc#t$H}U*ivXy-U1NF~O z_Te)Pz!HJRp#wBjG{8v{$R>j5EW!tz5BLq@C0_`@;OYXEQ^_QrSOA?f--U{U@N1$D z&-6fY)PAe(kTF&g3WXvdJ@WoOHw%c+xlGfu1OO-d-6aanFa_$nfN^LAlurTmMQ7bY zk1ZZ7EYJaE@F9n}8=xRYqZ6?aB%cEJRVmT)?Vr)+2MD)DP{V`r00K%BAhigm)PU>& zib8~0PzERPIE39Mv3CQ)z}rA&Zb*6BjKf3s9{rAeop?B9F(56&!4q)cbZ;VeM|U^t zi&!w!?j6xW*-5`Tz^bYib4!y=wt- zd7zj^(Df|>Lk7MMtXq3|ae8!gMDVh9$`2^nAI`Y)!QroPx+XwO0>mzyCoCs7P&BeW z(*Q4f!$7V9#gQq;^w3ZX5J3i~bZ)-IA3^X2bHzjth%a!x*_@oWqvwG@s~N~BFN0-N zMp01+*fTs4gL($6@c?R<(R7jn%&0!TOP;#AI#5dhfGAL&W&G#`aEPGNkg>9YgPYe_88(L ztpL;#kmIV%=Wc+c2WqT9ko4f21|(yEF)c4=gM*y4wH;YBWdnGE#QiMEc+Twp|SrX@(l(pfnEEzEkc zognfbY|X&c*8!R7s5JY~knoaYhz2BUy`l4KX12FyI#PD8ATxeO7rw- z#@DYyclru|IAjml#@%l~%@yQq=fj#AIEow;;8j-3zqYp6pVWbU1{lVI;_tEP%t}a`c__o&!}$X(?iWo82}c5UPij>mpS+|B)Y{;Vd>><7TJHipEkK z8inc){F&ZM*@9rSwHlKw+HkhWV8QY5>|~^Yq?B`gq5@8^2e?qsg1Ny^yWR9On)Y`= zffd>(ZSys<9p#Okbs%_ft}g?S)Ip6R2;^-6COzQBANq`;P6;n)s&t6}vtx}0&VLWw z$NlZ2LeP5w*|ktphBS6X-9HGXKJCAL#2H>|NP$0?S4D6oRE*z=I*}YU$NcLZE(Mz3 zr8IaaO*-XYxYB@XEKfP#aBPvkzN*{eQtftjX-s_XUY#FNtMx7Cpr7J9oq9q??xdWk=y&*V_PZZy z8OJ0hf%s%ZEZzC8j@+jCreGEpzq0kif*)=&gMZ7U{w;v$ED%V&93n%{43Hw3)d=hd zB^v8IL~SKmq-4XW+vS+cH;2^@v?&*h(aBLG9%hZ!J8)<5h(qa>8wbQtIq0o+UND%l z{D9GzHq%CVUfO)m`o;gp=ixPRh@Uyn;FPm zWG85Dg7-2>_50b8dz;lkKK|Hn%}1^LzSi|}+1+003zq@E?+Y^Kdq`g{8+9R$Hdaqu z@%0M;AY-Rg&l~L^K}2zruVEeeM3veBM!{=hHM<)Kp)n*hCVfdhiuz5uquiD1@=M|- zuWJ*fezD%-UCY$&ABGgJgAKYL%*khxNdr!G5Y=W*eOjZ8In-zQc!OeJDwUcTq-#F8 zu6VJqsv3QEJ&Kh@wzS@?)N^AeG=*i1eR7!KbRtWxl{)}qWnoo92ri77`U?dWw<8p${R>%YzU)AX5*<977 zT=U~&LsxIk^P189Xfe4YEwNFXsmk>sg$l3I?YunkosCw-a__UmiQ?wVPc1ln6Tu>X z7@D^VF5fIG%d}n$#~Oe7H2FmNXgxc7k>pKMNK=f|{9&!UWpGxWX4U2Qo!EMTe%We3GMs#=dY^@8*TNL71_4d>8 zjU(repZb&A@3;J#q91^dj3CW zYO5Gb#!oWc#x5cRr5VOH%HHL8zRRsIu4DlxP47)k zdTgaR-9i}Yrl&>=MmJ5qSWLG@A6!oM9$*xP?rIJ1PLVn%?_*HSyNZ55tlTnj{=-=a zZ9EzkKpKWaC)V6?sUuFDflqFv3^`vm=K*MhwdtK<>EF`@&nw{DuGCMME8tnR-WP`+ zA*d_mmsQrH9I_)_=()b7p}3?Ceji`RblDi2E_Af8YLap>@N7Pcw{?TLuc)1oL%GRa z_l=(L?QcWIxlY)>2Orw^>Fyg|!-8iCOWzuu*Aq><-9sVIi{kVT&#gl1d|}5+7)>J?kumW>Y-S-0l-r!Of?8Ht zSlcpzs~umm$PXL$ae;B&#-*oQ) zuGbQvV|4w~o-I%Pzvovjn3&9-Jr4l?1%Wk3oZ7mG(|kHI0?2{y1)U;%o=lR+1^3iw;>0SfqOlh@w^ ze%*5vl~H{FF0TiWQ1EYJCkYKF6S@!>{sv1!>w%okkOU!Kw3=V2h?zJ|#+R?`xm7&b>c1{M zFZ2}gzb?oW9)DQqKiAJv2B*h=y&}dy z9^&e4Io)5+vLJWkNfBIp+Db- z4Q^x=6-DIcnsi2l?GOrLF*t46rFB@4OR1`k;WH@z{qbUJ$t?D0{HmHN_D-^4SHx?Q z&7FfsI5_tE(rJNfdNTTvacut*x;xcTN%JmX@TS`2QY30StqXzpA?W0Df}~FVtyp z(SV&@`J{TEXr}sHY#Q8`EGjCx?DD-t^}Rk!3sJ*vUGGmihuQk}u11LrklHrd4_~GE zT$rbN9mpUJB)@1c@i}I^2Jf<+ay}x-cjO{j@IhE#AM4$GQpr=)nb_A`J7xK2iX0@( zb{Vmg+c3`h4F@p)WJP)`EG#Ey=QmnfJ#e2>0T=j!&=ULk`T6BKZ_D!0=BW3Uxw$#9 zix4U{#oD~<;{7L2pWaXEcNsR~7Z5-eadmZ#a^pKVcjTH0c8Ksu;dlNJ8Tk;5%Td!? z#}cmd&&=1fOi5sOC85J+Yh_&&?c2wX2?!(|9XTT+BF?;;??mMn6tw?x(24O7zB*Dt z+HFSIkUi|e-&|Gpz9x&P&UlxlyNFmg{aR8&FD!greo;TM(C`mhepcD2iscm-v*`L> zIrsGT?jx>f>F5sHDX*ikh0osCEObeJcBTn`Y({7UY_Uw=%DMbcw zl?46T^z~16rVHQIJ&x`E5lJrajEO0`u8ufs_~*`a#lA;V+^0_lJV~1MP7Iownn;4D zbB?v|SZ+i1Qek_t#PsUo7y^OV4GNvUE8WdY^+10w0B?*nAI-h`($`^v15sORp37Vifr3?p$4=M%mrie0aWy=Lr+ipva9Nl3YBKLB|JtRdw}`pFf9; zZbB~4_UBw?t>x2%y`?v5a&m&ONO{1EWi&L1z;Ga@rYdS`9gc%SQXU@sb#--M$RU%u zo)J=%-f=Rqbo;h9C$=ES`)jdECoiO=q9}|AKt0O>gKgPy_`>8lMK8jo}V}l_c#HL*a z1_s`{#W28Al+el4`^jgltWoVD*rOvO{@sw3X2fpR*RP>b!WW@Z6z+iu2}BdcI=!2t zdG-zSfgvFkknWGI@U{%G5H|#@#@xaJ0M1|&-p`aHQCdbOqoEheeh0>(?(|y=r@JotKjnlb9&u zL2+ctHCQ2I!V zir%H7qDtnozxx}l4+-G#c8>ryp?&)F7^9Pfl=OxB0qvVNZ~7A0k@H5ods$n;yH+!@ zvH}Qo-Tah<&jZKCRC5(mWOa1r?^`mnu$TZ~Cdku)puKSV{ivk*;yFry-*~}WBaXzg zZM~ZwF!~qvEB6-9I(b7QBPsSsKE=fiO-)J5%A)V@?}wA}7AyItq@;i_8?q7A?$&Sr z-r6!;?Ti2;^xxgJnJChhl9R*GL(EJj2)c8Cfn8agAg&tER-j~jc1TJ6l3+qHF)@Tq z5C0AqeijM8v#TreBT~{{=9;#D$1m|IB^>r7Llt>CBM|AWv`lf7g7*15hRy} z6vS>rRAC|0rH=?VH#Z1SOibI>ctLkxADLYmSi4VPO0fV|Zq^5D-D~UX4&b#&uhrCo zW??3SsX{+~{D@9U>h(UI59u&mZuLV(u5N;KKUOc%?e9zEWO)Ak`}T3U#rSidvvx{{ zE^^nU!}H+E+~K@5AFz<*6{dsh86gxarXmPpuzG;A4Y)`Wg}wRv`}+YnBPoQ?UphNG zKVgznP#`}d11#yCos%=XurL4+{=#AH0SpFXdRH6W0B`mjI-De}pZA0}%{fo%fbE=e z8#A!03=Iu6$_(!-=EVYRfdKY=55hnk-<(@QxJbJD`+s$bT-&z{<$s>H*+4Ab_$-mA zsjIJ?BKFU{?xCVO&O1-;U%`EE{8bLrR8?PVYs-Q7iQJqIx-a>mYLx1?y@$W~i;4?O zRP!daYwf7gybjPBT;V40g(fE5#ssjC_yKKfYdoFd!~xxq`Xw#7CK`3HG49-@QSrzt zZ+Ps1*!lfCy{Za7ZBR-~47ug{xC9x2d3*(ea)-@+MLeB8S>WcljfQdUlmfrTZvtSknQ1z0p$X|M+o9pS{BDDO{BTTAMp zSaVN>#wR8|CM5jqj->~S<&KGhJU>4V5?ib1k(=OEWKe4Q7T_8`hC3=MD(oZ?jW^c_ zz?r31)8(y4_AU)q{u_sf10pRlGBQAexa>^5($*#gm_nxDb=lE)$3h4&<(l?JhMk=q z$!ew&4J^O7_&wW}D;|)Y1A~IFHc50oEG!0`A8y?8mGIdTGLP+oypNACSQGCP7#6PQ zVBSCZGW3Z1TgE6rp_GCG7TC0Y!%~1-{g8|hC$O=}NynY3GM}!Av9VGY*i7q~9q*i7 zd#Od)EDY&32h7Z9bw-l+)-)d%`5^EJ2@iW1OZ01IWdQh{V0qEek3b09udfj9s}a1R z5fM3nobF=~93_6SeR8Y%95KY9FFJI1X!w3j~E;j7j zx98P1^F1pa&_p(ayHB4!y-m1WJUjsM$SU7K5Tn6b4`)f$0Y;KmRHWnLimR^XDK9T) zIlt`q@x$(P-t};|0bXvqDEz&>-47#btEaDT?ME1H8+SCI|WU%!6ca+Rx;Hc)H7`izCGc2kAI>ikC3u`1ts3j!92{qF$uc5r~EhSa!W(@ruR3 zz<^B9?NQ039AL=h%X>1Pa0WmT0<<@~MO0EmtOWTmXG-kHd0%0guEPFh|bQ{-YeDuK_Q z8Vn_zQX~~botMpcq307LNLx8TU@tA{gO34lq&GFCfPwlPWl_e&$9DoKqoJd(RL|OM zR|t>H)j8g|<$tzB`Rwd$Bq2*Wok$=J_E>KYSOu?+Mu7%FPE#;Mx~P_H@>j&@TI*yJ3c=CEhEm_ zG!!lR01%*3dPNg+nk+f-zwkIR)1Q(2XMJ(*h&61>-_)%OcWEw)8ybGcXO(Q8O7+-c zC{$+zk4A}H6QvDy8AS;ZB#bEiLw(}SWZAABn)08~0< z#RCW-xAm{OW{puaBR2d2(1c397o)>0Vt>6$viocC=6}^hdJq4vMHSDt43*L4qBU4C z`MygEYR$(M)3w>AcU28~rt=e9|N4IL4~cVxJD$3`R73Qe;Ywe|psu+k%SFGWY5ix;TN?(#^%xAHoeylgp5LEc%)aee5 z45G=}kM)jAt#{eN!HIcww)N~PJ^H|7@b|bKb((4F8S6o$4n*WF^DoLt4#T>kL~5SG z#kM?VB#PvaL$|Ax4{{Ak(0>!tPwsyB>FOy$v&sU0dV1PTkm8oYq@=n^XIVgo-rR9_ z(bLl#1S*Lyd$qoy0f_w1e$K z)C@H}eaO_*RIl*cw*lsr7`2dfwnE(1y>N^M{;N7U&C@ad4?03obMo=hnXr7wT&4e0 zPU;dxENbe5al^1qp0?m|jx(Eq%t?&~-XN*#=}7M>J(_*xt0-pGA1v$pngp$)6Fuj6 z&9$HAd0+f2e^}%F2dl)-Yi9ca8RnOibiq$?ad6ri-48=Qd?49x?la*8#XW!=;ng2NEz~ zx59Z_U0GQ<6bfx96b2%Kh}VV;lxh&bU!TW2&CXVW6eR_qyPkW`U4D1|Lg_j{tsB3S zT1^*;Pgr^$H8|pS!>TE?O09Gca4L(MJe{Rayt^}EhbhahC24GybF=@md>BTe$TG3Z zuKmZVITz>cKNCAgp9NZ*A}XWq0t{(xYfI@O@OJcTh~mPGsIG?v zvKkgeANxZBpFN6HW|k;4=(yzfNp%M}htwb%pNiu3h1UnhluD8; zd!~2YI=w10*8C$Yzc1jX&(6O3ClhZbAf4uEPwx%b^jK;eSWzd|6q)=g z#PDTqblE;EoL84nq1Px3gZMadGn#7I!#q-NE|Y>9u7y6=ZQPxoalL_>%9$Sg%wjAh zogdrnp|SO3P*ZqhtYo^mMaE&I@E$r+qifXLW|7QnQ_82YAB(C9d2YN)>b_H8^BosY z#!2!a_F_zACI?Q8uFdo6X2pd_4vTq{!hN1T9lLA#GySyv0ilwLvh?sTD1^J|(jzTb z&*~1TSv_p+)1J}7`|rvsaV>ZHJByNS))X52>XSBmwv?C$l|IkYD>HmtD?G4K#p|j( zyo3Uc7_$|Ax!ag}-IP~z(3hosCp7uyNlVk{9bNhkchYHPWeKCt_h-X9*5>L$?_mt- zzPh&b&8w{srsLJ1gY36vF@_a9&znnnshGy9GL?{MVsyA1!1sCf_0Au9ba1Y$uKoy0 zI-+T(>j!xtSAT#)&8@6xK^YkyjtOL<>##K;*dD_hi$G$7l$1^%$M3R3l&6#iDx||< zDato0Dgnc0kd+@{pwNDVhsR}x0NnlY<6{d8i`%3GiY}l%rcytDMr{R}5{Szs%9}G( zz$BoGi9B8KjLKKZTe0KYa}(T|t-;P44eN$bKYR8d309FuAQefLk(qg0(d5b}ih?2< z37d0nm zEWrBh+-z;>LApz_sYiq6Y@Ex&kNnjOK7ozyKD1q7)b;R7QW7u~_{bib3?zrO>Hl`s zDR$1H;9q7p(u-@)B*Go=_NBjJWqo|9)B@?I-Q#0?CP3Vg*L|gdN~@l{(*YzO^YQ1 zNke!tBDRbVI(0U|uUjEJfP9FOAUtSB)pyq^Mf=x^(;e0;b5f>0DpxhyVq{;_cE;_{ zFK28}x+9S}es3y9`bF8e>4=JFQCKC`PgpkZtSdU|Z!f&+U{4UhYE@8hril*}-;uO~ zd8UY57R4Od7A>nMg(xk~wr=}Y>w9`VMdMQTt|fC_fz2fmhvbinjq&kHY)_Px2tdcN zO&%#QDn=>z#fHAaPvW&*-&<}coI44iyj;o@i!O=J{r=hA+>s_-D za}Lz2lUFTH*`QCqAKS!`yPk^fF^h|>`LnfOga>BwT9O0UxsRrGdIyfrRrv(;*Ee>B z790-rn9~_xEEKg}!FBUto;#*72@G_x+;z}WV!u7JR)zD<&u*~Md+KxNkI+wR%L%e^h&672q^Tm{xa{#utx3~9u39>+iX@(=57IlR4pcxf=jxALn40|M+2xIZ_@33k7SsDN+LJuuK|ShlwuLOC)qF#-1`=d*hc^kHmr zGDy&<85tRV)))j#tu{4AY(y*`-dkH+KYMx%K^f6rtA*U4HMk`yd=Czw6 zQk;3>uMWQq-c29D!s9x!VTEiYsyg^o2RdOy6Ls}u_;BQMrxcOww32#;_>#T$K{ zE+_hNMdAug6gJtQLgoGpDhQnOA(XCr2=st%v%Fn$R zvXLGIdrR_1%A|3t%#Yn$Efn`{z9}0Pcb*(_LaA*fWcVW&A!|8#dAP*HGVbm#K)H2m zFNL(f0@e=jo|rVt?_~{3TUb1&^u1&U{xpy%*tIZ+34aS48xvp#op}iZcmsga($gb9 zBA!0PPha(phd}_w!y5nZQ+e>4sO+4OT^vJ4h_0aJ6r13^`QbNV< zYv7FSR9RYaW<6(2P&dJz`fJA-sN**0rQx=F|GB(|_U0*OdB~^m9Vc`>AVZFc=MtJi zgDSX2WW!RUcMx)6Z#~$fWB;m@}}QxzgdA`*jcnwP@E5 z`_Rdw6|&_^Ay3ETh8*Atmi6{p>)|BA1cqY_@Z*5}1q>p(XU{^x@&nNgEK(JHy_Utn z=?WlXwn*$|e+4Q62LU()Sip$_$`yGT?I?UVKR>^1bzh&2D1pPcC*AsZ;sj{%BX=P> zllQ2n$RG#%tYo;F3r7^cxn_lY<^bBk!r@YZrjcvrgw z4F?)^h!rLqJH@#{aS;|ycrla`rBXTnb#;bX_|N>~u3GMH{JO!f8|H2IE#Mn$txxV+DRzq3&G z$^Y#&lw6|b@aL7L2I_-NadAmyc>hj6?;RRQc-Aev`(o}>Q|QizJAAnxKzNGROMrPxd(a3 zF4jQEnkIm&%Ss`CPtVhr{g0pH_4G$)=VMCBD9|UEsWbnqEuJ#X`##-4xNS32?}}*xd1<&V`& zYR*^T3%M0Ah^*7~P;_-1ioKU!G0d`~EJ*iwZ>z7}rJa!z|Lu1g zquwqbVNHqO$mT-IR~GaRek#0Cg)?RLpc>$V-QuRE^yK8^pp#0cl-lQqjA~|M(+P7y&2{HY z>c4e@LF)$Odoye6oQjIr@83oF`8`rSypD#Y#>U4D*oeSZ0`5&%vN8QJ&Of|M`|trBG~ZY^6Bw@wZN^$^cC7O=cBE+ulyLC{4jHUm?GWDs zsVLK;n;vvEM-(C|!g4UkHTrEVv>N@K>6!91A%QJF7WEcF&*Kxt>nx{F^GOGaI*h-R zMKf(5TKTw>=jISk(X`DEeR%OSHVz~5N%=?TjUqBHx+tt!jJ(m!RpP&B?XyG6w*JB_@`?Uz*D5Js+>YxO+I}xBrBO4aOuiZ zn#1{}PpxUS$f|G9rPT2XMVDg3WSNVRAPPFeLJD2StW=Io+7d#Tk&{YQ-X~#9vLnq3 z9l7+NrrwzP+4v%byrgkJ?u|`+t_J%X8J>A?7Gd!AEP%86(Aw1wihJ4M8Ajby(^KPD zDAi5(8!sjC7+Xx*Ro=?GA0jQL?u{!qwue1!lf)}hP2NJ1duH%5Tj(LQAkg0*5*m;@ zV8&r;QffCrpnE;5#p6N9&N<;f`77Jm3MDZNc@Zgd6_z0KHDdy+`q0I3Vr(gq4WnU~ z{EzefRsp2(+X4q50+9HQivS>JfFR(OK4W27zdBvKbtWh&l_VuQ(`Ny9eJ$uu;ss49 z1!?IApihYWlt`xr969df!G%T#APjgK1w}GG> z1r5ntRDf%YAe zvWg05gQ2)<@pK)0HjQ>Z!oBba>K?OKO5P$RUmbD7UJPV&#U7NUv zi*x=Rnp_ba;^l5N_pxh+d^Z&_94Nl7J$Qv<9|Is*KhsYn7 zM=*AFcH|aZ7;+Vo2p)U{)hE|Ojm2!4*aR+f<)y`qNkfiA6if}^xraqX!4GG?sTJH6 zVbyCE+;e>W`cZ{Rzag-(v+P8DLFLzVphFMII6!0#uiF-CKKcZH!H2zEs4fP5;dp>oR#yae_ZR@VTV1~tT zpj>v%pv&JB+C|y(9o>#8ieB$4-S%hfir*s>$A20FF_x-;J#Cwg2|(Kew5Ow_rH_rB z_Lg2fXJiCs0BD4B8g*gR z?-mLAG&3t1Wrpsft&ui0dJ+z_0MSFLQ_gK|e6eL5|7Sg)$#lTc`b3bxEDWDBKe zJHB0cKn(i;1my>_{G3{SRBnah)Xj?>2BwBnZAj$FQQicT&5b(auML|R^)3zv8~Jne z%d-ERQleRAF>MrUV-9#VnQACz|blJPiAe*TvPV3bEu-a@ z-x&9gyT(w5Yp?aLH=cLSXFhYjp>i^x(O$fM0RsbrCN3td00V>A1Oo%h_#6?q!+%p~ z1^k2HASkZ%9JoB68-)P>CvX%|b5yi3adgqQH-<5_wy`p1a4@ttHnw*7V&ixM-^LF- z#Av3Z<|t%utnX-MV@;-HW@QZAgn{|>ecYFfnUich;8)GBDl%4P*7syA>^$$ec$m3z zT(sL@V8~#^g+D2|rXMbQIVx?pfFE3_u{&j0;icfP(_xUA+Yyj43K8B+=yklSEeuwx zpYC+c!X0fm0 z(SAl^Cb6gkhf>}?2!fr8uFw5`@7Aa!rf7NS&eniQ(n}x%;01YxjyxIRrx)Uzp@dA| zkSAAvkHQ=k`IE~(v6B(&$wlz91fE@v%W~p1kEo_c@~8|SwbYX)w}vR+y$hRr!_rM4 z6dJnM7GnOfv&?pG>x&|0upz=|00KQOPJHN-4|=8ulVhc(rJdeIu^J?ouELeWA~$IW z_1SWX>eVeG8E`%MHde!+8hzq%NkMKyiarZ6WYVVXVj{TqiWwOh!lwW9L4xL&FJFGK zzyNFdz`t~Ma&|V~6MXu6he749`T2Pt$fc(2lfjac!%OKWJ?UKts>X|^C&Lpq`gJ=I zq`hwYz;|6Dkeufx(u;-wx4i1&>^!lw^&0bOs>{jw z`GHAUfsr^_emC(?%D`^|&`p8sP0|Nf@pD+UHgCSR`)ww-t(ecQd$~!sOV9xt(AH@m zDN%jSZ zr6H0{%Ox%hNfc84H~XyRS3;-0XrT7ZxNG6(>c$In@w9 z{W4y{rG%_rMU@);{h_Hq*L&%WjSVjMi??WKXu{+%)yEFMf4%wIv1Xhy*6MW!Q-4s_ zWZSfyWE_Fw^uJq=dbo}v;54F6|jp7Ln>6~!$r88sHO3hL_OrlvGz zW@bwsCoRo)$4$*w3l>Y>cSpk+ACmBNJ!7h>SiNs2K`%*3G1^T*UcK2-{1Qq^uh>k6 z$v8Rjil+zjq|@Hg(ysp=&sL+yH5pF!GfO>OX-P{?#>ZvQ5O#LvfN6VxoFF6m^WJR5 zEjzg1Y$v$^e+Gt!x0+TV#{|2OZfsc?+dg6!X0r)On5&c3u`UdO;a}f{)GPF0xw*NQ z`_z<_y8B3dq=4^Mn~WHoZuF>?wlFPM8ur1sUd@}rs@2=R3CCkHd4NJ*;o@$b-{5oG zQ`W7x)5X!q@|T}4&drH^`UHCiKA~Y`j66KF+!+JODl6kW_G_TOUvIw3*z->ZQi*zX zP%w#mh(%dLkk4J6Pc|?`z*n1Rxo| zqvLyB$i8aBUS8VhfyO^D2J(uTi{ZBj^sV@lAbi`3_$Mg z?Nv}v=v=gEL;GlD#qc?j=nXr20v3fRFj=G(4!bhcQpq1RH3`d`&t3pOa$B?kI5ILe zX0hAouC`|Lx^=9os&Z#w|5BrI>dNcr=qRVA)(>C^vAwvwbr8VYXM1~A^kDyu4TG@o z@UaCj(D#ACL4)CBmiO=9|5N*GZx4&0t~yZ{DFqhRr?!D9bM# z*9*sk(z-K$(t8+~q@f-@@RAU)6EiY0reYN|Cl(gJI@ zEI|0dnf|er1KkH67-ePU0Kqr>*K6>xv9W5kmIUA>2aF*73aiE1>Z=ypbwQww!JlT6 zZy6YR-b-6I9dR7DfOV|fAkO45{ce2K=Buq~@2roT&t<4(Glw(zJib_2QPI+d_JreYp5MHsr+-B6a2n^y&r{hJ zQQYuA3_c<`R_y0#L5g55fG>Qnak5m%H7ueo0-SESj2{$Ov$NcGf8a9EYWo7e!{P=oyW)GnG(Ru)` zkcxw&wyA}wD+mof3z#GTV7|wYvE0H!WUdCHE#zk)8xO=%HQ(+R->+l%ka2UT08_4~;lQgt0suamFD4`{=PLYwCDy}7{DtFvvAx#epPU!kwa!&~BrIlct~N10@3v4& zBbYifGgBSOZqy&mU$@8I8!FdhQXK;VMfFT5hq!FCd2!oG0T{^1&3)YTzQ=>P091H* zctV89m;3P1gJl=-8sVx8`y!o~T9Pv}BYOZHk(j z;mg)1Wwl|Ek;mvVD}Yf23}UArJd(^}U2ScmaF5~&S?*xU4vFmem>}YUZ=9MayXqkE=mBJoTTF(y0vAbp{dyby*=+E1$Z2iV@%356 zYxc0C#k%vm)rZWWAVdKAO-U<~Vq#pfdx=QsZ8FK z2k6Oz$#SF9*6uE7mc3D~FOqoErR|Q`us324ef&puw(O!cUJ|rvhBg}knv~NJLBJLS*zSYw?(PbU*|KBf1NYPq zE{trZqd7G-2`wIAz+)_#wz(|XA#pC*!UH`v1RpQIXJ^k~k>ZX6e6aU*7b&az#h0Su zVgi6)2hhb0IG^$H@g?_z z(w0mgc8eK$n6j2DjS>|)fF=HRr=IBnSOfLL8Ph{2KyC-_Z_fSVBtKs{0=_*C@an++ zpKtNt1o=R@fmR%yozDj8$}tHEhekM8hk@qpnwH~}k`$Gdopfe@3)cMn2_vVY6FGZe z49xH2H}kDbYc$j=4W23HNp%2(1K=IbOi@oK8IP%mYR>UC)e|!8(k-i@vA<0sir7Gt z=?WLz%=7HKh4E_GLaLgmVC0ugj<(hIX?|~`m=H? z{`CMNL!kB(xRA4cp*?*0Pm@?NpDg5pwV~RCv`ZSZCy-lb%bef-&=au1w3YVI{g^~FW=gZOvS558BO|@{>aw%p+g4X0w*VPV-sxGgxHi~x^9G-+$^!KB*|TR#%F5au z>e?j!Jcnmi9>K8^q<9yJR_9g6r@DU@>%2_o+jEYyFD>WJs1PZfP&(iZ0Ug)fJxAsc@rkMD6S-VX1oWw_4v0E_;8q^7I zx(LO@IZ2drqx95S2J>*{M@}C&;jfkn&a|$#RIFnEJ;Pg7dxwWc$4d=c+uN`eZs#WG zB)qNz5E3AE08BjT?-#Bd)=OfVnu~5n9#n#r06mr0&~P8M0;~rUEow=5`R=mID%HzS z7-sK#*R!)TF&UY>>gw|!cP$#~)~6Ux zklFbbuFloN!R&wutPxf34yAECF~!c(;*G>?#XbcF#{E*@LF|KPPtNUwb$eQ~@8>+i zq=k8LQSwoF8KT|EZJOenp7U)}^b>j+dlJ{=XS;6J6tvJBUn46n_v9Wm!1Rm{Oe0=;6>k42M=9=Bu9-RbmA7CT^<(2mE_yBCRNAC%c zGhPx5d3kvVKlJKo5g-hJVXj`i<4=;$76tr#g@bjq-KHon$(RV}4zLlA4m*W>63#<;OwoS)+dyrHUsgMPA z%j*$NZY~LrE@!Y?kvNF*_3U7H8m%5S{}%dtYu!}M)!C2&Y;`z=4XKOyi@7;DKR>kU zn`zx*ihvKlot<_MEkH?s*43SVv+;xv?-3-!%ql5~sI9FXdMTDs;Qsmx5E&dgq(PGx zf!X?EX4bIfjp+Z0)H%h8s>Jh$Z_zC?i@5cU*SQR?>`_rNE5@tp#)|kNe>8-ywPY{O zs3mK;i*7chF`xLm>ZV$h<97xls~nr97}#;~L6h&_l=hK*Vj`TysoIO<`KnJB)Fr;J zYTs~2?J|pOS(Nj8n|u&__gw9z-Pq9fXd2%|$s((0lQ`c>zK8jXqlcSUJ*4gfX|aAd z&yNmN>p(TeU7(yrwT+DePQD45Ii}V#7$~lIp*fXnHffih6tx zHXaV+Ydz9zTi**a?2l2h#1}~l7+fzbK1f!+4nDm1P^~OhfEf4 z?=7EC$ID%tv*56M3*vK$=0=}hdfMmDKliGL?OUm+zOT%!XTZGA31Kg{?$soP>k_}^ zOsckYo;3|{bES~w`-ug!n2vLQhR&JHKZr70sqcY?zs|LqWM>`wjePgef^J!N+i!UlqhMO?OVq#w zZ$T{hzIs$#ues8{ukQX^zGF z`oiZwMf8x_H6wuw;+8LYIK6a;X(sb{)Y~kOTGKM#9{+8Bx$8cX`F5?7(b6Jkss1uf zrCzMF$*--&1#|xbyP0#z*1_vl;2Exh@rNf{Xyh2bAlDaEefR~Ol z>CZTa-Io)a(t?$=YG%J%9y-V~9~4=3E_$|Bce+c!3XL5eReTDc#~;+i?I`_wTy{2C z1zlHlBu~8s^xMr<6fb;KN@nXH}|uPoEE&f{arH!#}0N{v_2!xv*uS zd$=P`PU@>#v_zLv0T$_E_B(0<=Xb*{v3)G}IAU^+4oK2Fl9=K! ze`5sh1$4g~5|_?j+<8yMRAKK0zlU;SbgD+@2g$YMT0C#Q^>eU%!E&M`KZ`gzH^dqF zAA-=2*8$J~KwCmdNJx-MX7+m{6y#WEwc45iI2b@BdX4~Rxzd~h_zOU59i4Zkh4X^} z4D_6(Hdqqi)8aBR)BuVev_Xj36|^8Da@(Kwbg@ zFc?f691#Np1F%E0yZ7qURDeGIFh~Go%Oz!LNpEayY|3{Gv}C{4j|J#oQTq6gs>yp! zP7p^M5YRt5Ick}7F$)Vipn0%30Adkg5s`5~b4_H>wB^YFq@73Ncm};Ysdhaz zfB_<_M|}@)n{7D`-W;lTEscOfP*<#0OXowOTT6)-vshNNT-Y#PKk9o}@d$wjzvnEZ z$uxesN9^oY7q5^M-{rqLzzuE2=5&=T;9Id)Y3%rx%Msy~(Jokv<@vcOTu{u%C8s33;s>;=nn-b4dMb+ANVK!LaQqUh z^WPxF7!{6@#TKiJkVEgPfUrg_1#K9W!_Bu&EX4BinaE-jN5?L<2Ga9nfXZGHoFK~^=zig4N^ z!QXx;J`iO~a(yHN4C_}yFg{`Wt&c(Yw{9a-)wySw^x4PSjSp(3FZ{noK(28a>>U35}@DM7(b?nTC4fKG{0 zz63G@kHVB{>lu#k@isavAYsi{o63T`lvP#P0A29-MZwRX?*ZgY6`z&tp11c#k^1zW zz#14DvY=)+o$YJ&wE(hASsT=yD6-7so`uEK8>V5}{0O~c)N$;fd+9YHp_#R{$$DoX z5aI)p9iS=80y}B$$QcIMMvc2Ot7TUnPX7Fd`#l@vRwbWK^+`W1?b_LX@(dQjH=K+_ z($2RiD5g04!_2aGc=SCwYB~zaR_wu1e~*wwcNmteRB~$yy%E+diHk@}s&Qo zAEjOs*+-(`V*l`*>nkkRk5)%*bS};j>w-tHMhdMeg?vr(=X3j(C+D&utf4CK=ILcTMHal9YMvDB}oscA0G&-!%yv z>7f$y#M}7XE#6<2`{cH^w%&EIQBY)`WRzC`5JD&{MbV2mC&FmF)ia9b%THGJxslCW z$msc+PMFNTL!ZzE)5?RM@~L_3A$>vz%^L1|^$D&uivRQiWZq0_pj{dk$3xmg8l&Cwo1I-F`dLF=|!1BS@Ay7Lns_Rlkp*4}iw#`?q6hC>nXX@|h z%HCQgA3J|ioD5SBheIDTm z12WO0z6z)pfSlUhd({#l`Sz6o5LN?e0>a-fzQ`cRC2eT+Ew8LZnx!t|nC-Fxa#gL( z>L{U2^Cu0B*ML9mGT??FN=izi0KU5!2z{3Y4gjHauF`N>e|FokX0`avAW$2I7}6Bw zc28hyI;z!yfX@+rPs+GYoUDFC141*X+|cnPZBR6LZb_%7Y?myLt>7LgI#Vd=x|Y@g z-FLu~2YKNIEu-0vKl%$Ar@;-2W;eec0v@p>O2)5mw%E&S#( z?@pa3d$*sZJA=ZpW@LCb#!jc!dO_?oyiH$m?SH7Bkru_QzNqC;V7!u-p6UcxfBF>Ycrj_$5zLFaxh-sTBec$J}(VR=Gn~ zaD(|sxFpwbunFNAsHyvWxS4s4WWIh4TTb4DKP;$>I~`GixldE6c4sqU8lagprfJTP zPOwQoIfWtCM?6%;oTQ9Mc%$3A>iT!Y$4J+-9ND+|y}3CZ$kEW_qM+c?b1}T#KRo2M zSnLUlilX6Z1dqhYy%h!oA0YigNlguGVj;kRfV|KEfZA>LvbddXO)k*mN`hGf@%BAO z#oxZ#KTk0L)S<_$5#Zh)wS7-dUThkfZi7n9P+@oCU_e5sU9>*HfkDqHzR_96qpM;r zF671RJeK%=()_AjRH7>57cA4XshhwDQJ$>I$fC8B#fHGlqlT$)jNzE+}^15iXv zVSSn1x_Y|ov8zCj5ABb}QVaA^c@8NMs3O~iH1e9nWhb&NUkEfij(I@6n}@1UZnyCo zBv?Mo;fAhWhBzotq~~JHFj_d-D9BkHCojHs{;UF=zWWxbmcP@cxJ&w~PUTNOxH@FM zDWx=ctfeJH%;zOx za!fpIc&olIN9@;cV;Rt<*n2NYJmQbi6B;7dou6NO(C2=MKE=T}WXR8PL{g?OiDAzx z?omDP2U;0)+>~&Rl^Qav@+DPY78Jqovc3rv2)uxO3_cFFl z0c?QF@i!&|1H*d`4h3c9z!8&2-U(#Is;m|{B1!lpfMwAcJ=U^<6joPP-xZraN6?mp zt*xmUUswq1F?dYk&doh%58NgCQPtr$vHJnds^aCKSoOtpR8zuwIQ|BuJPTuIp zRZb<)>;#eXB`$;a^Hli%tpyOA;z(T{yJViI9*EE#NMkCY*g2T22;P6Yi1&4K%(Yr3 z<#X+(`b=BJ>w_OsS{%=(kwQ+5*0~LP;{6XlSnKyG2EJ;J8}R3BvNc`xJ@CIQfyO2D zI4sQ>2wr0dnLE#(%SE`tJJ>wSMOrrE&X`*qd^@k^HytOI?v{*sQSY}$#x^89PWa67 zizMYiE%v3c%+9b&_02Y%4nCH_3MG;#mzZ`_TzZPB$4k|aeT>29sIqUyeS;w0g18@? zOAX(W+sgn{*R^b{`$nFHso97IOefPBJXpxGSGe&0>?dw>FS+1^|BQQ@UDx%MI*FZ095w(lG ztU={ysZUI}!VKp{FR{|O-w}Zd+V88?D(d=|*Czfc*sH&o|GTZkWA+3frDO55ir2rhqwx9M-vdg2E|6;lsOk8`L?@tsrT{vvP!OvAQoS7! zuWR(>!7MhQe*!|OnWg0+db+!}5A`n3F~Ec6R8)FzFAoSSAq1;QsYOeNI$KwhSAo+eIf`Xg3 zCBN&z2l^~YaIJ_-pwd`u7%Xo=C+$lP!d!QA6A-#oEXA#Hb4@zt6OkN^s*OI;%0gDj zCCYVWRL%sCn`R|C2&rD(n^C+$B`Q4c2gSjv*qSnt8?L6kYEhKlalwmeo0ILxo94Ys zM6N{S`26CSBq6#sW!?ztMU@*#*e`X=a+q$sgh!iGQ6D|s^CrY8IyU9=jMS_;bU`4X z+@4LO$rC}EPE_FqK^>FvXoAm7dPx6tY57=?T?73?*}wuCp_(#Dr(HApp45^C~ zADwP2EFqeh*mUTshz*i>jto&;c33Vea*`cEc&@cw?c8)x7iNsCi7TI>gn@u6mKuwT zT|6Sv%Q-Pd;H*w#vQ}GW3|_&k&I5@xEg6|E(0Vb&lpM$;Zho+-k88e#<0$b++`v>r zBfJ_CV?t8RynxHmlHG3j?lWXy7$FBXL zTD7bf@eli?sBXuxf!simzcs%SAU){WyA91McHg(`7;IQz>;~#IZm#OuygJUq!@>-p z_czPs`7#-gVnma!Z0h?^@i^*?T3snAsd5NRS<`VNn>Pr^NiIMEXhlY%n~bK512To* zM_&fb+UVqDR}I}q@3{$m1W{g^_5Niw5cW^*cyu<5F3w-@wstTAs``{$1JWt+X2nip{Sj_71O z+x|yhUI8sTo$|H*>*h4tCF7p+s6thDgM8%a*>%qE-kr|lEq_~xrYOmem%C|H;RF*h*qOYb9!<>y`1+1dW@9<6)wbbC)PFgF%Z2lJL z8!Uk`5y#woW<8*g#95hd)*R@Nsz|$2!`s7s$$r_(KEk&jUXjfK+FfA8Qqw@o3mdo@ zT!FD_O`$n0f~6{YJeR|S%$Sgnba_MK?VhxPOQfd6GAFnCyxJC^tE}t)KSu#n zdI13>kUIZ}`mYi))k-l+Ag8Rl4ZSMdh%2Z(|4dDGR*c90f}^$pR2du%#k^qt~70SG11e021Sm~kmOZ6 zd<=T^^qpd1py;Mj$RLC*=q0w{QJXi?yMN_Yu=Z@=t0MlJ6ZtE8?BCA%Kk_rBKuuZ^ zh9?JR*Wcmgf1XQ<0HU5Jm(j2Pvv^KLNwT8wTtZgx$y35x25LZNfid59E{EMQ0w`b0 z$QZnM`aoGcFpmbo-I%7RRMdG7e&CM+c;$m4bg`7}=rN1(iqLT8b#|WX7%>0HW z)_wZ24C@GbBr(ck5e@@?*?;QkczDR+S)`aszZ*X0DzC4vVM#TgzT;l4ku44+QUUh$ zQ4zb#;N@+uu4LPVJ&w5r2AkvWUvzwEPv#VBPyhF??>}pT?@HT%o*}v;3;B+Y%KXT# zaeO+D{nL~@JgK9jquvKL05pF8X0n7$c{+bupWtH!iFsYWkOe($J%%X-Xmz3M2%aV4 z>EoUQU%WbCjSz*B@v6MG$dO(=Lr&Rs^~%-$WtS45!RU2n5>D##=N&iljxuW$Tf-bU z{hzn{aahUpXW6skkPMQGRdXVL(D;mwJ*dJqQD~QQ@l9ehM(1&Ry&@OR$f_8zK_U?bZW@C)}}taJL!Db`w}W4@P;^=qv2?ovGPGCiE; zaUhT#1+Sx|UJHP+VTL)d$yt^c({FDLl7k0n-8YE`ucomMnQbG-HWC*d2E^c`NO2BU zjfD2B_chuRx2O+lKdAHeI0t3k$2T`-?(30bA0a{+=-hUsnf#(dLvbuT$;~q9t#3@w z3r|0G=dgQ#nC2Z}v5*WnM_b#(zqKo=kj2OI*0vM}l z{^$16oLZ9GP|Q#>0#{MZ(<26xOG&D0zh7mX>P?k!Z81Tn&BW)nRD=)ruW?M@_3+3E zo_b>+z>vlAPDHa^{otZu&ehh`>log&6AG6qT9Sffvb@PJUm z#kB`s&sY{@0ojWYF7?r^-Yv#B_dYifq=B(TZ4j|-Ir$Lxj$s)6XDZLaACM@#(CB~}&n26SKH3@7 zN{g^h+gC5%ncIsKyX1lYt3Lv!r+?b`ek^4`+XNhH~bLUjs!<@j88@U=l)cIQ>QJ86+u>k zC7N}S(?0Y+-Wf#mc<_|5W%&COxQ0K-&mTNV^=|KVnBaa4$oq=53QbXSQP5=0Ex~%5C&Qa;C+SJBd%qtd> zr7o#`e#Sgkn~<%auMVyF->qs4eL(UUh2Mx>s{c%Q4Nj=o=`rx+i!A>^8#z^Iy%DJ* zcac9GkK`vuoRHvjE#A#GEtEPWz7cBtf&xtm;L-_W-9-21iWgOb&~$`b=2>P`A=XaOVA#b zPqOf9@iSRV+39U8eraP&+^wphjxi%n;EJj@VW1)fM~+#fHMIGl9d>fgCSX&XUu3m- zYS?p*fl=jI>u0m6c%N5r%sRX(J_sko{h1LFsSY1n5sIPneIk{f@>2kkV#w6-J*30DN&Dz`Y%IpyJi zk1#3?%D&0XOOW_f#!i4+wjv$m8XIDxd&EbhNGM%~2awZ42q7osCen(}`wl&iTL-4^ zMo#+4AE-abmcPqg+kREW=t>~)VQ*D!H1@izsJzjlYM_lXIB1q+x9)^PPL=R0_82tm z(sroj#z^k-dYr7hmg1*ef#6@Ko9FcEGEhhOhdfi)eFi3D520ZmdE@QDHxI`po5}{C zieQ*eN&jpJw1!%_q9a($H&Z;1v`pmtiBOjdcv6sop)sv7lwM{EhNrD6z!*gqdiV2N zg+7d5FCy2`i+@%jM9T}2@{l!Ru9fpOWkg|0y&+xUQcK3*_q&2{wNoQ!hM`?L&7piG^29m<*1xL97yvOWavw-PX6gA*@!0_n>$TT>8&6^-t@6Ok^o3x4{q5 zM-*)iE}=yvqR-V+WXIn#$|d}z(vilLq(E#}eSM5mq84lCt}PWD>bCI0O~sM}{<+*; z%EMG|_$z86{&R~-UFn`kp0D{zj0BY|FbJ>VcMs>qB&eKnaL7v$;91Rs6RE#;o!$$#<6HMt z5b0JiA7)?uNx5?wRTt~6z?>E-etW$o703L?lP#)5pSq=*)<0P-E7wnNaehoXDY=z{ ztFSb2zYE6suuHZXqom$it2gXU#nESbq&~#aUX9!tMNap#6(Q!S`~h2sL7#Wz*HSq$ zVSMHdCVFLYx$PT=1bt6 z&BIG~ET{l9@rS<(uM0GGyIL0Uwdg)qIxbVIbg}D9lHhf9oLSVvvZ-6+D}{7K1oKms z6wH%eHbgFfQ}AO)2$v`msyV=r1#Jy$;mYh0By-t*tL^X;QkwlVlM}}fyrMg_&xgsY zATs@_-MQK3P+M66iKD{Y>jiiu#oxjLYen@TF29!QWl>r0Pp;&+ZNw6zQG0nUrM}(= z2qAie-N(0xV=jqKO!KBrMpYLd`U<>S114&oxsnXhY))Z+aqS-&$=VphSso%auw%80 z_%v`Oq2pmqg%d>j@Qle}PH1i@NSSWlGv{?VX@r@&RGr^Zqg1+Pc%etRuM;SDts)tL ze6#;9hLG`s0Sna8Y$oC=z_=dxP(;07ERACHsR>_#m^D~`c^(d@E@2J2EzGkc?K@694bOz{}N5SO#&RDtnIly zQ?FrzL?4?snUsHyO4|>RG^^6=B2fxn@{BqOF>+%QcfWsT_Ko|pCIbkN)zD}dcWJ$7 zR)wLl>h(d0e~uTVOU=5td%v-fbh2=~9MNe0@UEZ)IfjcRoC>@XqwhQan+68+O#a4> zd)s(BJ}IGU(ZzjRDYft0r|9Ly=49WPwRU|q!IQe6x%)fd35bj;@f`_IcbJJXXOI?o~S`Nt1ko z_sW}m<~{6uN!t&}J?hnrlS7f(pF+TE;!XYfPhx*WwZB48dkmnSRM6A&ew!o@Zxu_x z%&ZWeC-0&IpkM>53QHUMdlW+hJbm`=tbe{5)46T*NJJdoxfzM?x5s<^Q#Rh81jzL4 zqcM1=_#ig?=)p2S^v11s#?r>)rPUQR%8K#?%@frzI$sZ0RHXY8KOK`lds5fP>WV66 zJUgEKnSjNh`>`9!&f2AbV4cDht32a09g1*zQ_2+=(OQS|Pc+zo;9pi%Oq=zNgu6rT zms3sE+$+a_xORUQ8`-~)YxQq419uF4lbHV{0x$&w5#^yXgXWWfi<}(t7?%2G{#OzH z50H~Od;_$h*`JXN@c-!s7S>H{rOqBQs>bDZiMKTxH2rgfK+`VkH)nzD^(puQ1Pk7=AI#$QA-#p60#OEv5yh z<+BGn}QlcSrj+A5J&6&iI!4*dMP)%UcTLr+w6-Y%Umk8?I0mK=L?hq_^mSJCJFixej!}GBt$pQOxjqz7M3>*k{rfg!`nBl=wL}zUYel=AA zRqk0MlHz9Yv?xhcu6amf>A71Ym5MD50gYk;KS|%_=1|9^=SudcZs^Z&o~gfY^czW| zW7%t32J0iQyh+*erXd{5LB;iUVaZHnm8cC~<6U?HZDGU(V6A}k(PMWiWYh=SphNb! zvMw(j%nfr=_g~vax;Pk23m&*~(oQL88+NY0+h2H@!Uf%`@+-tvWO-d&_`-hRsAJz8 zPV}AG0F4H5PUBI4z++;pcsb|T7eisGUe?I<)Ou+^PE#A@Vw&+EQ%BDF2cR-M3aiYR zU0QDIn0_40FC*dv!fOnp0zZ-=Fnof;@~^)pxtlwhL&o~D9=izN0h6b%cQS8TEhHJO za%+x(`Gq4il+&HJeYs4s0m|V3saWyk0LtX&9m1in z-;pbQ5edV6l^gF~xAIftV75xT>2BF#S-Wi!>I^w9TeNPQaW1Y^-bbr?`@_UfR7nnV zJPSc8K+ix-j5_6wR4f5&+?LVV9mS_EX=-Gtpr+Z)M)$quF>VlecX-X|^!P4)%ye$^ z>F8tdVoX#uG|VX{(!IyylH;E-)ZB4R9J87qaJ36l8>f+Zk*lAUdg^%m9+KQ|E$9OA zhN6_DpPj~w$BCWIo{F5uWCzcwPJXhSd1fSx;j$#GVvR^N*N6AHQ9T%3Sx!JLllmOh zMeB(KMtNocg|4^V2IzvLbeD&pL%9x$ccMeYxn1m3L$qYWE-Z_Sk9AiVa1$F38!ik5bVW> zQ9OmhZc51erpBfwiWt!+%OyO@hGpi=&Yku@-Tucx0Hiu15O{yXV3YiRc;DDWCPeu4 z(_V0o3FpjDFA_%lPq%H?K2lEn!_$EX-;J7lq-^pHAe*kNtW5q}BqhkHt4k^3Rht8C~jfd`>=m-Cc3w&1$m)P`lpfqX2 z%&h+DfW!p4ep6xo**tX>tBRoDn0b|GH#TpAcfr}r1A%R9^!R=7!PMsI5udFHJfN)3 zQ;R+nd8dNCkxX!;tPyf4H8?_W@c!vU+h>k1c>n9hjKaUqF^p>LUSvLT=;Y+~){|-f zzVUy*^KSt3s}H>@6mDs00S>SLrx>~B+eHij6khwHa1LXNUuNRgolPw zdu(*G@tfec5PjjLo>?67^E;Qy!>2Ye{>H_@)fZ~Ol80Z_M`nVLi1a!wC?dr`T55Qh zH`{=ZkMBJPcW^{cZ%^#DZMqgcM`Fove>Z?XGkcDxcer?jpG|fG*!X9rrvs}SjQiC* zwTRIBQb=r(}!tMhK*COup4x(9x>g|9E;mp&@4j&&9vte_B&*u9nIOpyyKS|Ca-X$8< zM90S3FBlLL>!7=d;t_(3fMaXlxMqNJ;4Utl;^N{IMa81;_ABaIm@Ko=z84pxBrT3~ zm(c(C@uQa~<1e)LC;#5t`#!K|Bq%D{-zmlZAC0R&6c{6VEeYl1IvF)JHNBx|&iuA> zQ>9u7C1r9SPr~1?p54J;d+GpZL!);K5dTwDO`xHxtE)ze8iBqj_NKe1uecxkaSz5g z2nv2piI1Op`XrFNIU*+3Cu9OP+Nrqse~rUSEEbmUlC;ww{{z_XZoF3dgml0&JANZf z|8bV1&z$ZKgh*7;DF1rqB{yI&o;5c9*A8q&WW5&q%c4-e*F3wf_=Xc>@~@Ns{aBlQ zAmfp;h>Huc9B!+KHE|vM_nlm!Fs&a$#=5k=4^$hbY*0v%I6& z#qUBIw=AS2cx&7coLjoH+q?q#PLr4oC+au(qel4tGoNy!;jh1ZO_P@89|f(P7;WU@ z#j;Y@5v?rjeL-YD$XpAZZu*_3)tQrdbiZDiufC{fIAffWp4OK2_xLxK7?CAPjvlm@UKRHJdy`a5^ZfK1;xL(G%;qKWl8|m!@I^!MZ;N7@!BtUR8%}u^ zy9+8x?1Xfo_?GB`L%I-&G@hMvZYFcG$ybT@zhO-ft+oO#zZJ)A_iQ_EvTc9+%36G6E2u%PxQRwI9mX@$ZewQNieJx*)W+{w-R_NH&>cd zcxW`cLSXTCv-Ebz6Nd+SQT6Hz-_mVoWL(xaO3j_^*zIPpiJjOhw6?~(@LP3azu$bD zJo1fmlsx}|Je{<8a44T^rkZS43y1Q0k8~6}gIv z2=bg+HC*JcZmFF)T%RW&z0&r==G57{Hjjt|;1_d*xvdA)Jd)3D6}K>g4)!eMqQlzg1x~H#vwcoC! z%lct$tZuZ>p#18J>g-hxj$HJN5G1(R?UaEAW^{y3T~SAtj3xN1IBhq2O-ayrd!qn< zU*0>6x%_BQ!q*XVzc1J()d@=|`SPCM(Ylny=NF`>qU>qAe}Q2tPfZl6P~I{Qce&4r z_Yjt!b*#}4#ht%xZ4TH=+S2<*(sA>bp)$-GyQFQ&9oq9c!Ruph44Q6?S^;~1ve~o0 z)dFQILB$)m-$a2(*?)~kaL}-ikf2JUi{LG=)TmT%Z!5fmhq12ZeIPP!N+T;yPMKt7 zsc9&jKiQcRnHHLn(XJ`ctxRR8@h-u`jV3(G`*9&#vdH-*9{Ez*M|&qVO)~74jz1f@A8>tCv-D*U9<$I`P&s5j8lH#O)!&ydZRvmU_1@8NesA=s z79xU#=skkyy$>RVB!o{$^j=2v-bZgCdhZf~h;FpedpDvRox$jgUIur<=kvYyuKQcp z9~O&u)_LFeoafokv-jDj^*GFyF>{l#AIE@=c@77!(dq1AQ3htq`}dppVd>1q^wWI| z^m7t!|48h^w{VPhrh+vOp8gq@!>}_!&^GGGn{lU2#2@TRi~2FA$;eaj{T8i%2>P*$ z$&j$62Hu5h0raKUzdR_XI{Gqn+vpG%Zp|1XMV-O+BOyEZ>j#g$Hg=es@@#y(dTQ#? zDew6+LGo`@`=g_TM??+orUH*?4(k3?seW&l$M|l<5~T(r6LrSr6GPJKN={bf75QI8 zmd;-bMEnHyX0=Cj9ye55au#MzDZy4CT-qXL2JZ5AIVlc}_Ju{#1k-TC@-ZSyc~>ub z^*_^wnv1N9pKQx=sTKJaj=wXeSm`G2h_lkj!Rs`h6RLaowIbF;Ja$L{j!C>iuj)=m zC0OwFji|1y$G{7~#QQ}2bn@?=?{Y}PDuSCh!MV+h$3hT%gPRH+M{C|(t0eLc{Wxxm~tZLxOUf3sveL8$YNR26jxt^u| z>+lxYv~iumrprX@nrJWKW=!)W)uIi5L4jC651hKGIi0gDk1^ZANKC%3n%n+ zVKD+5Eq)`E@Q2O1xzY!2LNvmjH`y@@Usi*cMU@UzKb%Eal{9;bsFU0L1fII+o$<11 z4)a(L`HSB;gjpGIvNK)f42~s9b(YEuu?)k-JqG+E#U7MX#}aHfe2WCvQ~U(<}?#@xkej z7V}`!3yhA|o(ayHr*>E6S^3_daS$POA(UV5f#{SrEkFE`c;0)axQ$%=J=Zfjw8tDO z1RNr=Z~D8I-clYzqzU^+f_~uCNXdTM40@fb56E*@>ZJON9TChPWtqhD9bUz~J*O=j zG7g&6)4ke8ujtXBBu8@k_d|ML4VsPD29S?{9m*K$ae^VyET>$+wR>ve zYg#80b@$J(HOzR7ynkS)x|W$9s#!>I|1^^<>%uq(*CHM862Ifw{&7MfT3^n++SRc+ zyLd8EzXhvy7o6E7u(NcZf0bSoFZs` z_o+=?8UN(?Y{MEAXrSG`pX76u|N2AIu1OxpD}i4+S7Npn6vK_HXSEMInYDPqTq-oxeLaAb^)+SE|9w`bkgh4t&uLEBk(R5PNIQ0cJZa0 z^M_CAtAWV%6{`XD8>h2i^)2ZOqFp8q>&;&WM0wQf8suT*GB1XgN2Q4GE>xY09jcJ} zR|%&ihsN@Tbi5McG^0NYpJuAxj`nq=h&SjgRrj$w*m6L`jQsC~F^*bU^%hf(Mu*L^ zs3JVfb?btko`~Q4Lz1&~2fzL9u6t=|YfG%%&iT~Vh2>fDf88D^znArKd!}*SKFtfC z9L`wxS0Dc?^HTGR|H2V;7iV^sB=oMs;Sd>mP!n;cbCbo;dV`RE8e3!IvkpwQV7wiF zOi$=rutJ*Pt;1MP@A}q`H0R;(e`{6z!9-Bv4vfW&8hf%kx5&Ld_TmI%ZA^2v3WSNS zDDSwDaGg(zv*zY*Z#p~P0P=5PQP24^ol9rgz*Jr18ga1SVY|A!yDHgkJ8)ao%n|+p z+(XrRa8{x$TIQ{KPuPZ`W4b3pBy@q9 zp^b{T>ZU~MO7r`B6De+9YoeS14qKI_-rEy9Mef$T<&L(?mlRq|Us$E6K@5QQoZPAa zTs~D0VgYP*(6c7AeSABcbHX${BC;jC7Glr~-v#n{Sr4_8O)+_ZH zOyVr7+4nitxT4TXV~gU&!3vd3hNRvF5T~aJ#EUS#xzVp8Y7=RgzyE6+_t>^%B|*7( zKo3L!nfn}HtlfJU+7IzbB(wvsb|!%SsJ9W;C^J>N2$mLmN4m{8teHiur19H*wq6~q z`0$Bdh>wikv9jDR?+nW(P4(^d_Zo=i$XsDP1K75}KojZF^EhDRRLxeU)#n7wva?$j86VHWNi<=g8I6Y4;y8d4gJJiY5S zAS9G$G$;OV%tgI8xN-;cPlsU0!-=Yer<1Rm$pyTy z_+XJWC-&$T+n}gz=EzNs^C#7l3v?@J)-A_+Wk{nX))IIA;a1V|pflNimA1xMZR~M3 z$T4_0U|6#^Oy1=P?||a4XO86v7c05TELNt~9yK|~hf05W_v{=<@=NnnTW>0* zW&Cuue-^(z>NMAYdy8Jb7f+5~#_fxtNunmP1ngkMV!rs$DEh^2cqdmqE3u%og$Np? zcoJsXyzNWtuV4XRs9&r*R9FvM%oiYET0CC3-ZctSje1D={W=5T*5pFn-z;N16~cF$ zN1p-E2$?z#&A=aziN3r@2Sfq{%~Rv$$#b`Dg_5PL$-k|sAzy2j*-$2pgyI|n3=?nesM_Jfz zv-DUQdRo&DllfBX7=3G)ePbvmHiF5m5hJ#9?Fj|sikUk_XQegIZ?x#aalZC(=g@$K z_Q&v_+fhD0iz)&oc}3wKH=nEdl2(Z+Mj)$H-e|S;c}#al zivbsqmjP!}5W@#aq@!zQMs5RqU}>5LZ~iy6>~6G#mlF=@TUr1`VZyU?H_6tmL!)Hy zJ8ICBQIHZthVREKOa`YXc5pAp=K|OeqLRz(Sc=~k_&U-NkGYdcth*9z*sl8XGW{M~ z2c@c?k2jZe#mBejF6aypj_xE=dMmZ--o^w6f$WjImzySE=0sP-8}X!BS#!Py+Fqdo zy92BVa}-h~ATp)tIR9%#L4)771hP~$dkcDCQQG%TEn{%4DDzT@bt|cbg$CPT@Bh;{ zsc>C2TJ}0%HLSmPx*{sIgd_~U^Tc~gy5zVNk)*9Wx^tUR(%-aI1u~&~SWif!)YSD5BpCyw7m5;Nn62|u*YhN@>W7Fg z2yLg!IW_(rL(jXf>J^ZjnKEqb*Mq>sL^CW%1qA*cM;&zPb|^ImFz`yd2uB&>WLSA@Mm2b~Fdn8}J{Mav z%5mr16E+bzoFTt2vWV1t7g}gE=wsCZI>ahs<<(<|wqTQ?pseHNyiI)pl{L2$wRdfx zeC=AafQZ;5?)S{M8iJyU(gV8RmMt`v{%aN-#89J?#o&bAmmY~hM@3c+Idg!EX_pgd z)=Ol|YLk@Z`BkcxgH2OZT0T)-$m2D7mF%FO>mCw#rB=(w%WRmIPm{UT6f-a-e3|R@ z?}7B*Hu%2ARbx?1?W(9>Tc(`#4J#)}jFlLc9j+p<-}6d8<=kSDv7HhpbiI2qy! zx{scdqln+s2kE;Rg~0%k-#QyXfAM8}H&q<<7p~gsgY7WFvD0MxmDQLr>dtUn-Q`wq3!M5qG%E>~-^P%=?;X>F! zkROZXgL5FUFkO@EmVXfIGct{3mcWh`H2O*l@0z zK;Hr1m50(ahk$Xqb@uWm6INXBX4?6F0>NcA9D{d4w7VFD;Ox( zK5b2G_sWLUwz-^cC>nlz1MkPfmy*<|0`gYha|LfN8_(aTdAIw|h=6^E=da>R3z6eX zn)~5Hz}S0M;xLV?<845!UdPs!t4F&MzTuucOat9hpUzy`p-?wY z)edjR@_VJHsQO;6vZx2&{r6O5f0Wc{FckxIPTOtL8eS2Ey=RY3fd9RodmlF_NTm*C z{bw>M!L-AI49_^OmdnD0MTGyUrE;5Y@J%SvnHiR+t~u;0O#5YpG&~~2@r=^+xO%41 zt<8_wIEU-xahYaT?bjF>AfH9EzJDyEqfs6!&)F1~CV@@q+Y1OvNt$B!86vEHnTN=F zo!`H|v>{}yAmCS;_`8mN1OdYcNR6Pe({zoiFfx-67Tg_Lj3upF*Hv;4Zg0(JP zF5b=dq{0+KEuLkmJK7>k2Y<`F-i*3vPm1`5T<_O&%6kKrru)Kfs4KyCpNJ-9oOr?3 zKOBjNUPFaz%6?jMn_6ZWD^w<+QfkUfCSLeaS(}{{*Ze`gXWzqSx8_RRK#3XrD>>q@&6vzSVceUCxhIR#D!*v zC9T#BF<$*nOTOx}@ID_ZHL9RE+}R@>#@&lU2 z{>Cb-?__z`Vp&#v4kRbWlI?U@<{lGw} zmsxa98Lr3HgcPj3`4P0@qz+y~BA)N@8Ky+PN+d&N;6L#>^&F^#zz)W2H4%M=!`KAt zT31g@2VK^#*D==$A-6@0gW4Bcnf{Y)37Rm#-MvY^)~`%X4r+eN(YPPq75bj{8E~nz zYbBxtoQo^4%Cm^q5B!L39Zg$Uq+?>u4eNN!VTG}=E*4Dp<++|ZJZ}v)&W?E9!taO) z<}EYa+#ja$L?L3p|9}pqxat5Qfl!Z=U1w5Cv-X&s7FxpOe7kljg_IOqY*%hi8MJdU zx^3fci@=?iHi(us#uOZ-9k^D*uhwJJzGmx?9=uJ~4suBJwlwP^bSOiY|7Z_gD&veh z*$7n7>vCdL`&goKm*)IUL0l*r$o%S^%y63jR}6t>aQ|%w{0S~Blw*EM!;wMvUsC|a zYH{G4%&ITH&?GDS)mNU&9DO`|lYRt}IZSgu)-X>_5&N zzxXfBLRh+97B29R%>C|erT>4s9#0>CtZ9~BvHx!*F{EgQ`@i1HVd(#E*Q$4E2F7Dk?bLeA|K@3^xOC=PBDnw^5!y_LIY#C7 zGoNjT&+E>1;yv(E!+haHpyzk^MZXd{E_Q!X9I3Yin7lZup#Yf3LP`;n-05)By)^YBJt7Ii5pB!OK#PNA^J%wagE9HyF9J4bpd3`Nz17f=o($8XP5wh z`%C9gTkUi5`C1f%fz5O%uF(Ihesy9qRR(nCRll^k49R^`#Smv3KYF|aYdOr7v~y;Q zmb7H=Uo$z48m{{$eAAeztj)yk!&AePA$w6q=Hz?fnh8IjHs`vze#j-?Cd;;WtxQ(^ z#;5m`Esb9S`)aF5$Y!xT-{nu-9~ry8@aDy9Td`m$T@i4wxnyN@-H&yl1H#qp^$-2u zXB-25TYIvF^%dQv+#{IzayS3{7Z}8RPIoNk`bvGOFwN#)92OX2M@d1*lqayf4{|hr zGB`W<9++JR30D!V-K=5{L`=65@LHKP8$TxI<)#(umOl^EF9T+UF3;Q~nyRK3U5=|- zAwO){y0}O7il#zv(fH$x)e61A0Hn=r>0N_)II^mfwPQRZ*!*KN+w<(2jI~c66n2pA zS$kt~`N!D#6S7NzW(tnIcczKj%SqM!+_1k+#B-e{`8<-r>4PWfx@pvr3#++W{9TU(klCXj$HL@9_!i?+l1y^ElD4 z-Nq51uXZZ!9#_HU-JdrBAio-%GqLte{XCPl??jbcL=?6* z-x146+f9*DtnI!g8Zh4=FdLs#l53?d)%2wwKWR~rL;YZ12BY^jE1WHyadUEzB)jju z;_4$>;J%OmQId1cyJoE+FZv;uSezLj^w#%>4XX{zXIn}qaB5u&B3_qQvqnfOmynpq@YM5>J>-6_ft!W9=b(j}RW&kBA-IPyTIAc_&7S5XJ5`9iaG{#^ zIr7xZKr$umwyiBbcpFd^{)?yLne0*7?Bsni^F#9<0cb>*dcl}qR@Mx0IHm*LSTKJ1 z=VlF#+La=Xuwm?|R>R3abJwrp1I}F+`nSiJJzePHBrMS-0uSl{%!zWv^X&w-SI!W) zgKMRUHz&`_^E8-x6;xV6hg5n{wtnSi!Wk3ST{KgW%@VxiH1a0*WGi=&qZ|(dXzaB? zBFpPtOrYc&M}@OR^<$xXW(K3zw`s|N{J|tiFvbOE`R!DEn{4`%xL$Hxv*q0B5 z4GUjhm%zTkmgDfU4XIs}yYim6ir-zVttL zQfA^(q)>#%uFzRsI$@jY;^w^@tsTStKX9t@`m@G%IyBFx--q4%Vn4DTm|bKX*?p4q zORhy39wUUcccWPf&z{yH`_Fqql5v@opu#*0GzsZguI_NS4De-gte4RnJlqpq+QW_WdazJy&VzWV{rUE8l{2eP${Oc# z|COh`O%@id;WK>MwUOCXAh^okI0da9v;ifvbFg<|JsNEei7#z`=T(fag}3ut82VmC zNQ_1ViIPk~8B8x??mW>;Y@gZMf>kctQ?tU>oroj=(xje%S_{#Gqr=O$Pa|M8P)QG^ zVe(iKzgM2@p5~H~x!o(5awoqY==P7@o0V~HMdFR**&has)}Bwf8PPl1dig@SyK)X| zz0VgMu0ms9Q#i~p2X*6ED>j>ok19J|KZ4MyFAOf|NOeTq1-kCa#k;+kZ+Fa!E;Lpp~aQN#B%io*PqbJWCCUf{pHkE4*!4bs2?iFi;LDE%$b9 zo1>l5mZjaI+e0LkdDf(RmY_Mtc*NDqlAu~2sChv+jVO;*dF9v>wB;K5Oc*yWV^W0= z<%U&S;%E~uI=uzWAxO;lNxQBR6C0o5*eY;k?G=4 z-qC?Gvg0Cg<*mbW`pXDTLRvPnLwj;*&mhj*s_5Y%k>Q6?LO!|A?}L$K=~RvbP6-cp zLS18U8-}Bm*{OJIW|2{5XyHs*=Q4cRPK@=>t@M}0s(g3XCN+H!qocDcCWWd=1MN|& znL^~@;qk@FA=874Cy}drm|wLhG&CiJGPu>RD^E4DGufyI+Lw*ig$H9t3KV;^!{#c{-~_Eg|O(#20eiO9iWjK zIoc1(IODD)H5ovk1Y=aL@ii(+W={1EQ;S;$Ilc-1x(yp29`=*s{t$78@Wr=S!fE8u zUuXo5H@|(Wlzbdh{#T{00|EfS;v@yP(ED}Qr)(H%Rbcg%ctv>S?TYxCp3E%{P@sH? z<~8AQvHxjlpFI^2sARh%m3s>APkQS<`TAm9!myvNDmw;e+VT*2<{5CHx9X4s#9Pbh;b-F@cmrOz@C z5N<~|sBkid5WUhHH?#;B6)W$3@+6HY^8$cXueXelxunR1?Mg2pTEl-=0ZxWw;FEiF zKHDaNp0@>Y47e3tVdi^2K->>_f#{XO`QqfUf7W(}v2@>ASag588_zt#ZB0-?_ZR`V zgzVwSqqe6$4)#OmzYc}AFwIYC^G&`+d-oo>866pL#beXs`YqG64;yCrK^mHYR z)2AM>vT$EAaro7D-VQ`-2$mkeZ=Wo1iC+2dJ6f??JPq+)+K=ng!Q@U`u$!y+e&9<-he{J{R#47-vdnt zvVH0L$d^WAoBl@W=8c$)TSAiP=cS{xvS+ladd}kP`ptgapO0xDX!W8+zGhx_zeqUgD@ z1=-WSHd7!ha~s;*X1PfJ^q1rjsj%s|Ipq-S@7VrGe{wA?>7QH4otEXrb{QY?uCXkd zjpt!IXx+3MVFNnRDUo37Ys%T{K7B(*Y%SiwqW+`;Q1xxHW^DgYmzbWGnEh1VKY&MR ziS7FOB1D!5CcW$Li6Npa2YuP0KbOtgPb5yXz#87>B=+=d$~HIQ%?f4Surn$VS2G%0 zkj7wKf_0_9ZtdmNmF&H!i4KSUStFhed*sT*gKOoshnckl0{-xJALS2K`ukg_r{LaE z1r+~a9uI=)_XijH#$}iXxOsPYb8Zbr&20G*g*1 zWcU}PxSf1fsjk4AQQrMt+y6>8gGSju!2cV`{@8@d`P#QzywcuaZjV}|642;2^)i!=kG|V0NxHv##c=;&IqHr6BdndHd(#_^KDb$p zL~F>`6ri+Z^MxE<5oE_CVLL6uMMUBYb+L&s$)QS{)szLRmXw;?-Vyj%TyK6pu$zMM zk7Zk;>Hw;ffL*#8{%5?UF8`({kzBA-;fA6=FCrWJ@LXxF#MORfavEtx)qHla7&36I z*Oq51i7QPLzbA>KNx^^BGqmT7Tv?;;BXSy{HU%O=7V@SG9miSu{VW&>z?`Z|C}C$SSm3s^sd_yRZe_9Tu;Q_{vyiA<0r2E0b%wA z|BRzw^Gx<>b8UTe?3b2(u=%AKKy&p6)c2D%tBkh8W0sG4^KJ;$%wpuonAuZ7=chML zhfjFK4{qowcdVvu?U1=t*n?R-E+Q|ctlw|xgZ)ln7WK;SePb+ zcY)XYvSn@ej#N%zde`tAp!D&{V@YVAH9^T@3BKuo9>W8STwo=3N1o!E)=~##$(7b@HSUuBvc!z4Jfq7#ta_t zdd{-{^q8|MJQ{ftIJS<{1YX|)A$pE@%4ENNYqqq>Nib2_SkCwd(*5d~Edk)k7sQ|! zH_*RF3$}Gp=H&GuH|RU+KVKTdVI;Kl(l>m!d0$;0GKgQnr1VE+kK#{PT0_8-~UJJ}J+u)jxt6mIqs9NS= ztHuI$?(VzHOxz@nmcLJsptQ5r%8!r7gli37N@;U|qCd4+6LaM)zE$n22_XIrLhneK zp-0`o>Q8Hbl>&xtc*9;kD8f?sHG(CJg-`jB-zZy7!lHYMwK$Z&cY19EqR8fzqEs!K zk}@}lsy6d*nVExa9OcxEj!N|cW1SF2W)Y#G)zrpMtV`+My$*Q%5J&cTG1mKn*x&xy zXIGdGAAe!JiCDV=0Z;*lSKdRbo=RT1O$<=+Z$12Cu%oR76`gDROQ|3+qQs@vxAWNy z$bBKw66@wwbq1HKr;{oZ?mO!+E-yQtjim?S&3bP(5T;_bK>?;VU+v2lhr8L|AsKM+{T$x(ljfWVVv1W%l*U#xwlH`!74thNbODTc8=bf#5 z3KXl+o_Pnj1W_PQk_Bq~a7w@SKT5s)qAbDXsO^(-vHP;`0Y|XYUm3WQ?}$=qQA65R z8g1S#yOM2=G#ovbyT^(|qWf(jRYEH($XRQkT;v7Gs;n&!m%d=8_N{DNh$u%n# z^%X9y{XaLoW&m~4Ze{|T<+TpJc*UCjFXybL?tz_d;WP9b{fo~WAir?wV6oFgA9FJ>gn zb_10T+Aeb&tnK!%?z#UwpKE|@F)HIOOJFPDu+Jai578+v$Ji>GI)FII^O0eonlYDIy%-_S2;Rgxs2^~yZPI9O4|)0 z+Yvf04qOnh8JN1|@SbhfTvDPQ%WV6)F|tY(f3{oUn@H}z3rc(=cahST~{PeukC|;CEakT`j>90jW50) z2sf`}O&rl8vre)ct65JX0-TtABlumIh1|?fg32m! zFbHx~FH$O;Scmd5P?upDje@yUGEL-+(BN^8-Q31|smdf6v{WfxKAJWK*=HHfRx-=1 zc56v}$$vEULBV~2;=MR$=9UpU!Nw;q!U)I@Jok)5j(W6We5^BckI=|9O&QL#JWL!wV8nu%zWsTWT93A_I^97~Z)6OZS zs%EmcjD-dyQMBg-&1dV3n%!wB*AcLRovY{f#m_^Mh$;#QmsJJaY=9Qcm-XX5dR>Jd z$mtyT@{gJ;|-eM!49cLD$QYv!{3S~xm1Ka?8Zjm}3Z0+|)tjH@o?j3^+vDvM= z-A|5Jc)OoO~d? zL-0J2U`Ft7Rox^IEa>9&&R@43(yKwt3quyURt5hSrBbl!pd8yiYlnyq4K0Lxr)ZyT zW;haN)8VP;G9z*Ws4yx*16uvXAMfMcKfWlT_;P~D%2l8Sy@|Xm5F3}BsC|avPtTy_ zNTTW0^tu{cUlCe;q+_$o(F3!i*54Pp*&kuO{&8BsUMz$SG-6Lr=uV$J1sAX+WX-wn zZf62lUug5W*n(^$>&jzNqe>7XOeRhNq91qQ# z7z0tX`!Dob*Bwh->r%~W#8cb!VjdmV4(pJXEPp2;r_uUF#J2Bi>^n)##Jig5@|(t0 zaBEa=V@ZRSd*{VAWQXcuPSC33$k}3smNXb~uoeimu&F#8^AfesM?7QLE?iC#wr_CT ztCAAD@a^IyIvA)P9k`tMFiW#aY{Z+e4nIfzTv0Z4mVWd=#r|h(maSxupqwD5E&V`6 zVEI7C4dEIVPQ#NV5OnC943oFX;~|qyIfxp1MwQ^SQ%TlkBYY9F|M*_PGsIkrO?S$L zih>^vEP=yK|6dQ1JbzCDwEJPZ64)w#?hT$7&X7lL(ODbQI;)PqY?tAhNfx^=t zl9Jl?nDhrzhxy!CT7Zix#V$)zk<;ueY5mW+wL5uzlHPe3-S_FE-9;P8f1Zbb{=}MW zcbeJ&tZ>5Z(WzfB^|~;gwLcjm#l5VuT@)B1w{@Qiz*PL#pMfC(x&F3soa>eJI~L?Smyt zH%NP0zHAbu9*QUq(^G)W| zLknUp4@<8#1l@M))m-R`md2H3maeghu95S-1H#yBN@auSpeJrNC=gb${1&9fQ%atD z@{Zbd-s_uPx*m;s-zgi>{ zDkwrut-F0-l0jA$#=>Iygq=O-d1X+6cVlWzlx{y*Gee%HkO&DMWJ^H}ZU7uVlp^_h zD)8dbagEK~#&5SkgTE3@gd7xC}_${~slO6A>S((^P@o6KL*vd4qu1Ng`eyrF^R z%@s@hmoFD!dKk%E0~uaYZ`Lcl2er-5b4IX%o+4Cg#%7tP(p}3NFb%D(0#tTb9q{Yc#p;~2r#SIK zVpRM3kAuSpvxb5vM~YTvJ`{o74FhrmTVLbR@)~wmXMCzo zE;kb^>%bBRLo8FV9j5|BtpKM8*$QF}xv$lB_ie4kFQvs>hRXs_=ih2gt&RY<+Wp{H zXFb77nst0CR9Vf+=$~aXEW@H`EAQh3w13PkoOw&|4)=R?@?_0~9I@Lm^MqE5)agJ= zScunNNp6wpS1Oa0gXK1%itrrw&3USZ^Ng4D<=wH`d=a91%_$2$%=df^cJN)Q*aS~g zGxyXZ$6}JPr8gsh6-ABLnsre8jFs`IO)CD}XbM{auqOiXBcDe>`=2#CV;{8U4Q z7Jhb}or2mLb0ebvg#d#CKIV}23RK#C_x81?Hd|XWCv~?!e+{gXaIdd8YzUC2{-Q|L zo-U}5j37TK`p3HnU%#2E({XU!%|F7sA+hO{otvLL9C#4>12y#yS`ykW9a}rf#=b#@ zj~o|9=C&%@a70@$4gG0rO2wT1li=#5%2_bcWSZ~PC?b|Gg-Z~(Qx4K}t*Do3^&nnd zdYva$hh>t7G#O+Uoez>B@LoYL=ginS*T?VY6VY-~e{X z*x{U>2+LIiFJ+AK?A>S5--6gfhoj`_hT5|;=3hw<8ZzZprFiWMvU|VZyy}Rc>O508 zsQ$ZInx|$doK813MYp{<(2#g5@EpKrcn!f*0UoaSWX&BO{@mMFzR8)iSH%VIdZue4 zbk+?fM!Hj~oO};hp@^l>t9U#E2R_T*oB^$o1zACH$ENl;M_h|{#5s-8oj#v?mcb*4 z`I{W*#wDWcW7(1X6E3=Sgx=@$lSiC+$Eb8igb%PY!_2#nvNn8%9${;{#U^ERXLv)w zlUE$H-_5HVThYeQ{Mu@QR$`fiIWu2VyS+sy=3Wf~&d$^Ad4Xz9Pjy&xG&8){yx{74 ztPPBh?Fm zkg=>$SM?cu=AB~NYBjhPknkCB0|~fs_X3QEG#j{jl0e9raZ=fNir2pPb}{STqva~d z>)193Y77V?)%^%~_0XIhI1v2BRQ_>NTO;WQ0A!Dm_nCFz$mG-6Xhs;}PJAHfVcO^k;k z;m+j;c8@}=pOO22qAge^RK_B(g}Ch{qv!m}2sLzSm<8X35W#AEi_;E{N4yuK9*GH` z5g5roa@`RPeYbv2G%~!F-?*#V#%9T^Pb2o`(oe?dIf!f7+}nq7xk<-(`=oX9)UyAp z@tW&d?|`6;+0l7GXT#-nCfRAmd2g40{o=SFgJ#6R%4IZu3RDCCi zfyp4RO_FT0BOJ%N@}sscpe@DaA~0uJ?r>~QHFUuxP&7>DN4XRl#BxT34}Rx6q2vW^ znn7o2Qmk;w_FK3lEkzR#U1T|;rf5MoG=!*0U9@oL9{LO=q}47TPQ8}4xQ1Tuw63gw zM=TKCEI5a)P>WfvCB#5#$Pd7}A?~ZZc<%LN1Bp*E--F{Oo%}^lGqNS7lp5Im+EAf@ z6ib|>Z*%jno1Co|=F(JH71u=R$N>5dfo85wvao^0#PbeuW~xvQ<$gY}&;89s zrEO+>vB3Tn2DX{j`{r@^2c@6(w_=$A^)fhlF9eO0r3v&2c<>!K?bnU~-1)D6_)LiGY=$Bw4k2+YQC7GgvNV$^n zzC@j;hN3FgAIU7ZMl;lOMW?wFHa9ravcK5x=XGl}UyH%3+l)?cRgJT7H?n2HEA3~V zUl^Deq1F?l^Ph6-%9lKYi8?~W>06W`Gs0h=ompig(Jz)(urf1%^s~sf@biWM-5nv3^u--){eKq7zqy+{6E(Wo zzD>?6oQ3f9$RC57SVnTmmmfsK`&yj(1u5lBUqcz~euHNT>`-T+1a*2%9MkLtR&Am# z4@G?T@&@Y`mQjD%XPs1oNjn>XJFSL31Kn}fLrd@Ak5rUko2EsF z4@l(WC_MjGrg4R)?3eUmQNhXa84^9YDf?CF?2m%oK#Pqw2-W$ zcI~{oTGF`>nV$Y@SLYCPyUtT!&YNR@66yZ7_8vdRT;HFNm+E%g0elzy<6O{pe2M)y zDyn=Yr1)PaN%frDjA)RA`04-eDCqs?WwSPK7W2a<=$C`4(f1AZYLB*S~Yzewr^CQ6b|H6JHHkiU$MdL6u5S!b8yj@1WQ>%>ac2Rp`wc`U#F zGL_d2EX#(fAW#R~&%jR1^3Dd(VVvr31a+qoNApj|WNqh3uI}|`k9J2{tw_Uaw{~GB zM5dk5UDyrBef4f(nM-(rgLA!p(~Y27cit?udi+k!cWGj2^-WUP@+*6lbA4*p;i7%e zj5&j>LuPM>G-iuAY`l5XV>YJ$tGvJ9ifie*Kw%`fyE}p4?v?<7Bv>E>cXx-zA-EIV zAvnR^T^fhr?(R1h2VGl?Y30H(zn&lstgIvw|8*h#v2j9-uY9 zFxq=aBwH=W>e}(4&?OVG5-b*C9f={fi&Rt^Xn(K3#eu^Biwu{Cds|;$OdI=N2se&@ z38;5Nuks$1y8Z5f_;!9EttzBt*LX@>Jbvp(b6Y8RN?x?&NYHzW`n0+3$Thy5LUsvO zx}oZ!jVzuoh+P0#tLi0^?PfuW7dJ`i;Pyj$!o|1Un!6|lCo=NuM#LucD#In6 zTOW0W8f#GnF~QuD7n?70W%^Su?yXWx0(`97nM)2Wi>JAsNAUHD%3pmL12_aCTQ%|R z$K_1n275#;Zt<@66c~%Fd`V7yQu`M6xCN?#*xq2Dxvv7=0^nLt1As%G2Tqyw#OX>O zcHOes7DpJ$J+suwRAn4X39k~Di73W$%x7^#=G=|$%nmB8N4^?;4~B$DXhL%u+g*dx zrnj&-s2lf7S?|V!Dx)u>SB-o6zc>0xl^)!WXii$91}-0KHUyPr&1|8iRcebtq;{Ne z8;qj3*cW)45TeCmz-;cM6{M#c@s?%t0#b2MD<}0;7PK<{js8HYYBj%uuISuwZmq5l zyS@k6_Y-1x%-ZhG0GNfn!14QRA`+sEARE2sSy+D9p9_1xf))7A^2P-HTTB+%c@r-; z3Y=R-IU_rsRe-+^6KmFyyuVIg`L&NW#WlPrE6CS7e|4@XDW{Kj7D6d70Xa=xU{-CJ z*v+x<$`)EKRGD+xwpwnTk+U9K*doqlXLvULPOc8Z1crJY;a15?(0yF<^7E_L0c3A$ zt5DR(Z4vg~Z|Y86S2HKktaG-ShKUPsqu51ETGn z3E;Qw3*qE{)?Z)-Zbwy^7g9~rUMLK6q=%dBsAX%yIj#@S6!Ej`FF%mZQV16S3xj!( z_HW)K_`T$q-7>or}^mvlaB+2>lWIRLv>zrI|R2zY@?q)+-ufMHKnxl)-- z9USUg6M(_jHY}7&`m~Nu;!j4KIVCXX?J&`r zdx+eT>ZSYUw&gfVF=n7G}we(TTc_f}j-bLB>HTjr+j>EHMtFFFbS2g`WXqo9_s&ngu4UA8NCvt(=P1tNX$|)08 zlSXfzj&$(j6_qxQO=;eR&>nwAKI{%LOncpO4i~wjS31#9BYs2a zB-;5M0XGgmZD`RY%`f{8E$Y0c)O32nH&15?9t&&yfz&RSXJCy8`w5@z4RaI3mW7MS z@MHP5X!@jGgC_^BvtzWcc93%Jd}X<3x05O+B2U~k%>eG=!H1yB zN8Pd=GebnfFT7j?wXJ zavRjVCWPDV(y|3+PFR{OXMaBiR|X7EIC{gg3Koh5s`V9eDo-m^jQyVSYo4FX}Vn9s#`#a0)ZmZ(M~J%j7K zrTf)MEp+KpRo_%bzJkvfReCwFc6wR5o&9XHN?SiJZDb6cmSkqIkmfov+G(uDIPYmSE%a!(k;* z-x`0x$t910(f@0oQFFFdv8?&nxGY_s6I>q=Lq6<&f?#on0R{us2#KP$GCBK;W7@P%V@ zwIfB23EUY%@e#v-4cDbqpddx?BE+}ht1H%!uDs&yAG1HWT$?wWfK&|I%De! zB2>jgielrb5~hVD;iMMwA>cNv{>j7l(s~y-%^$l{-Vs5Vth$IT_odM~J@PtUUI-&< zBd}|{TS9kOXJzhbc~#Nh;uk^FjzAo}2=2_>+YCO3B^c<%Pn1=2s8}AUXrlHT=>8&$ z7qle!Uw#*zvswL5H(nm;I~})sUkU+qM4d37e!V-H{DaaY6%^5 z{{19>+ML=}5o`?NV@>w}D3Qa1DY6Vb97EoCuzn0D;9^>NgT}+}uR>>X69C|>;bK22 z%U?u@T9t_i^5h4R{hIta*lIaes_Gsh=Ck=D-oU$IQ~L=XWy>~sJflIb*Fx{>*3ebb z_ooA8s_2(sEsfb58<}eloGR<<(UK02y@i_CHQS4yd>|YA2(HaCR((tBieX~VCGd=I zGF}8SSRK6&{f@--(#j zE(uen&-ZT9+`P&nJ6hNXXcR@mwRa50OuO2ys@@dOSDBMl)T+C>FZ9;DVC)wvK%=S@ ztU>bSEm){X5d}dYw(WZquYU8Aq+Jz5Fqh1u$4d}|RpExmC4jzK@4%B32t`hsjBugV zpdt6Bn5hWKB#sCpQ-fY799EhW6MY>+-0yec36R63bb8Y&Ky+fs8>}0$TE6e~mRc8q zV=KHfUbx7s#&B2!>S5TU>_hwFZS2b<+0e1jGrPEo1TJ zLqwwSXwxIr@526vH-z1gO+#|x2Z7sJ#`lQd_Axd4qL(-bBlhnzpO5AbN4D!*93Ge1 z4Na&}Ok7Sw6Os(``|dSep7X<+0Jq*tl*^qVU8gGh;2TK8JxlCa}VNKemg`k2h zU;k3Hopf3`cXNn>Sbr=i9;v*=Acqs@;#8PqIE{=;$=cx8`!S9ys~dri!fL2|4$YNXbUfn`N@)czg+ z)g5UO#7`;9k08sc=eTJBe2X`H!`WDvABIPQp-43Lb{zA*2GO@`s8_%-Srnf#=>iz{ zbT;F0Kg}1TzWdG#*%!;P*#C5<#E;4fI}JuospI&~DIFkQ&-SI3@hkpY6ovN^Ns zp=g-<9M^t?Mn(EPz;UaD$?d?6{(dXFfKze;_NoP@h$sTknHZZfy*)-Xh1)&1oiDO1 zwWPjueDeqvpLRjZsgT^Z?D+|@?#{PGcWr&1r|*p#q~Fj4PeYEf4K8|5(IgN=GKgMk zRQ=Fn-?aZ5sfej)`5TDQ9;9aM_}1*_+w9z`hkI!(}Ei`pQTw!3ua@mmaYy%5l=<=~pxFxv3CL^?blu=007zyR{^Ul1?6#M5}^8vXGp@5@a zTetc~RdO<^QXNVThiAuhc^GIu2scm1h02*=o|_rGEa9N;CGNe9FFw7o5MT8}+g#@Z zu8bqhbjCSmv3h?Pc-GQF9zo4Z^nn?|7`TN5KfxEc+?1C%H`mr3rVS+(0ii?+%osMM zlM;~kE!ZOe#HJ@zADU`Hv&ts(&QUI}2sG&id;MHlE8@ty_V@vs+>}U@uVNE$#=n*MkG7TBex9O_HGeZnwyX#UNrK?n(zhsiHye=^8;nWHk-}XmGqS^H>luIp zAl7Z*iW%*5;t2FZk95XVID<$_)-O54xtB<*dBv`YI2}TLYFu>v6A=>UUl2EkFDVY^ zM3hnYndtE$J|&V5idO1@`F0Kan>)OV%b)ceAy%HRSvo}n<7-d}N4mRZVce$&pr$qO zRm1PG&+i11(y!)^7`Gt+xT;t_L+XQ7WD5;T{%dnQK2zD=h=ddw0!7fS;EQ#i2~-44 zL_j6y1a_Ow#1hYSJ9bQo>T&!TCM$T#x28p7c=r8rXIJ4L0!?_`WQLS^hP7;(A@kI_ ze@}qu|MkmP@&AdWuWZU@_;`6Y&*}CxxL?3`863EMo>$fm=Nm)pXL@Iu>@^l^7juVR zv5^lb9?*JtBSVi$eLm%F6MMBzm8wL?SqGX7@-OuOMvG~$cKgwIpi7>M9;7hJiLDSjko)z=bN&C@GJxDe0TLgS|ky?t_85@|9@Bjq0F4nS%CjO$x-}U zVF7cj_HR0$LblE|$1l9C=nk~4Bb|huj35LNCdOS;;mMhZH6~+5Ye(Q<5MMm!%|!PY zFqqiqXZ_`;{6w|}_8Q6Mx&$-q<(ayu!mTPH|<78OXcT3vuVtpplN8`y5?s80bVakZumyNn<5%UNCJ3Qpm3N!U!`BnhWN z@AgjBY+)MTH$thsGlRy~U$@6A*YBso$egOUn2IgGM)ys4x&~a`UtpL@xA9>uxtNrq zI-QLzFWV`cK8Kyqmz5{;sV3>Cv?NPkTKCq)U#^~kZbj(Dvp$kWlf93K%sFE>!} z6;Sh>(1(W}Hn|JJDf_hB7iqqrhH<9 z#M%!5F?k!i0z_V3&s1DSu?J9d6O1t0pR*4&4nG-sy3c8L2W7NgnX>VTp3|I?>W(MY zd0e12Prhxvkr$W^jIOqtquqHY7+R(3G<0>{Zm9K6ER4Sv_tMyb$DN6!M?}04R0Ny#H33z>y0D@$$3= z>1?4fJOSuYH81Rd+MXX5%6Q-9{LPMt(7r2Ya&Fo=wTVPd7fO-TCu} zQPDa+ON2sF6(rHwpgP{;S;UI;9$zYScEg4gq$XhqU7^#cZ`G?hWyFao{c;^mqP9JH*@Y$wm1J7g{#tgV zAV2I2L7r(rMjG0+r%iKB6gw-b3NVlOf~@4g&E;;n@ky$bf$o!B57t?Bar54wl4EV+ zdk3ii^$;m>Z68LP+ColZg0W_<1PuF4SDc7yjzvskEh0#;`R?1(w%F~HW~yy>a?MW) zA5-{SGZ3A|QqGD{ZlsfDNHdMdRxulwOIqlAIlM8uHJTqd@~N9O45@rEBUQYl>gH+> zqq$iQEmQD>(Yz#vsO0H|*bl2U%2%DxzfQ*uz@EA)>%b_(P7ED|tXo|$isuW2rO49F z%a*i3xYn~%9SAGpBOP63DEwV!X8{Z?RiZjCrwyy70WkN>*5A}a@dUS(^|A?F6T`V@sWRdgT!a%3`l zHoo`c_k_PHk5GgCv{onV0d#tQ#)`4ViHfW?n~p@`AwvbXp~cnLCIDB9X0^I7O)h<$ z9*eWhr^g_}5a+Y3qfTD*Kr5pNPAm)iZ&kU;#4@Z`)j?a{&h{?t`{{hA$Jrc>M=N#m z;tV*0!jIAVXbZ9(UBLF}xhhdFDn8og`SvE#shit9945(q8fl#k`#l<5Gb1&DD?R-{QqdBcGOG%y=fD`@h+>ya^=G>;S@V+)_PT)N z1+-W_Hxr#FKgOwE%hC{9xM083*hmWhmoT@ZXeoTzaUdx{=T^{23XX@L5NuQ-%E{w9 zFW5K48I7sS)A31_k6Xl>Cd_-~I$Xb~$r^OxM)xsf?#^>%ZPJc?3nZ+b=R+9)RJ;<` z?a{L?j}V&S`zyiO_M`P%bNuuczHgaoojvz-oznNW*r`%NmGs0&+}|rrW#E9vZ_|H! zZ`ymeS*SU|1ZpvbQt?6ma{NB9?}cwKOozwrsKj9l(LIZQ ztmqcy0z7S62jOHXY{MSZkGK1^)jt@A$M{}BX^IwiE8V?|8}Z+tx_Ur$Av(W5G8s>l z=-pY7(~I-<41 zkfkLqe4y&?_rf<>0rO>Vx=(YPz=PZMV$t5ymA8#j<9jihTn|qU91szkSmY^!m?IS= zJ*9<|`){@CobRs2GUrmj>;m@PeaRfDE3ZZfCxkF>WB4fc&EI6%sw@uv#eT!u&IwD& z9iRj25dvv(0O5`}v1Zi&=8beyaA8S{=k#APIo`=1?NgT5m>$PgY`ZTi0bx3vY=b(M z7UD?b2FVn}<%0ZVQ&ODUk_5A* zW={bRTTSk0fZ?P#xk!!eWT=LGE&O?zN6h7_vAjn}7V9Wnu0LF~u;gZzr~&A)qKd(+ zmwWPyi48G6tZ&M7sVpNS?X*gokFr^tow#K$7YoZ5ZNcp2OCz^D6&sdZ2J(X@Iwtyi*pb%W^6|EYkN6nSvdPvd zvby!@aHb}L^LyuaJyNSnwMifq8ya6E%U#q&xvcpshDi3m#t+t#l%b!qzEzZ! zjaGd~PH4RjM)bRWu3bm47uBqd`moGW%jr?xTVZ#PxmC@H2S@-l4q4r$BYNhD_X=1- zC2I}`IqHGsb^+zxiiUT*q8Kqu(ak>zyn|TJp89i_q5gyf1(5en9IdLYv3m`^Jh?wT z$kW%F`thYWZMDZlx_GQKu84(D@f}Q9_A(c=^`rHm6h?LGPo1_P4_I9g12~fKd`#rx zGPc59=}1om@KwL;V2fYyUhXpQWtwY?;8<&Y#JO0NwA8HiK-gsfUACP|&Enf`N3Q%B zGtFj+Bayy|IFcsky<81yD}CuP5w|xXbXzK9DFXn&5B8;B!*lzN`GUN$K(3!7S!uci>0Y7oe$Kzu~Y@|AY`^-uU!K(}(@z zIPD}$lTr?3l5o^oV#;jW1{Nuk`;ER`Q=Fc@bd1&5qIEF2=vNtYZjeM@u9qH{w>^x7 ze627$37b`=zN|A&egt9KQa#;2_9X4y0MG^g7>7}~MzcftQw6=J^c~D2(s>P@J`)L< z+}(;tL0}4a)@R25`lj*cF=zo|-bgS6mB)b$L_LvfB-fLH5?HA4_Dw5B5L$v`x4%rc zAzFhlKcJ*?v2yM2UW)EtrIh3EMX-F6>+u&co?`6ovx4xiA-INtN8YM$?Gn;jVlu?2 z5E>-H>vk>HhO|U{F(~t>st7_3zlVpd+WIE z86g#2g(4R2yjuGMbHYgAaH5K)sK-HO~EG#iL(VmRra!d@p; z&5#yA;sHXwQ>H9@t@;dG|ItgGQS*qh983q7S} zLlBCD6hX`r*4VhQl=cjnr{d^YCotzinivu>WNj4o>VKWu`@=s#f>&@vUpSF2H}ePP*FQ8}66Rz%Y?S8K24w%W zkF-Q&FD{Z=kY5+uk0NI_jJ1^R=_{t8pu!HFa+iG-$H#MTBwk0oH1E~Cqvl3{h|Gz+rQnd1u6IM zln1=pZm+Kc_l>k%Evc!gtRFdEJC!6EzUK2kT_|a5BZG8{A?$B)0Ia4WjVEaNN6(}d z;{X4Hy=jS!jkV)zWc#T2`W=DXyXmN<6KV#A0|(W=$Q((@W-SP7zo4LCNN8x+)`ea1 z0*dd74GjksbkdTXOt_DkdA%}(`NW=&+wQ?>_&&dhgntE+7|hPbT05A6npgTpDB!SN znV(+}0kq~|qOdQ{6+bx=vB;?V`WO=vzs+~}YW%imXomHfz ze`|la{aHTKqJ6z~0`fV-YqiKIHZD%U`4BfVu8@`ATG;_RS1NK-*^!HUd@wvVG2N)k zqHc2eIDc@9sGhrGPR8(?sYYzy$o@nw+q==dtBF7JP0GLfgkvELA^I|0(WIX(ERYKo zvVbdpToos!=;#b;@*(w){f1ZA?G^_lt$yz%B_=2Pu9)QZo%QcHU@%x48Nl$SMNWB( zopSBHO@=Up)V_gflyA%_yoYVnl8}*MPa)(2My*k-_p%p2XhEU?Gvl}v!WdpfQY^MHp+j3uZ$kHX{SyVoyzbuC5B66Bv1v zw#Hsagvw!LVublu?p?afIXE~X#z%hT=KAOI)b%97fAy*j;({Fd(7CR?NFjohS+x7B zQ=%Zln8JbX-}PS3nq}iWu+TU{hMt!$0 zmc%^&YGu8yScoEE9pnu9_rt%d*#BRg)W6?4^eI)!yvpC}Yq=Wo{|Mm#p3J|}`YM80 zO$H4Adlyw^xPM94Uaznnmaq3ebpW-haF1UF?e!8dOo{(r>4b6XGTtlW%0G|ZgzY?? z@JA8EK~_F~U{>z0=eYOdj@BS{GPFupLG~fCH;+JPH7xAc1*2+1^3>`#vZNO5|C>G4 zu=9<&fZ^4^Z#@1jlma*sFDry}%K*s|Cts}L^3pK#It)evhwPyW>H{f zwEk{%=;t<@WJ@{+)=1TRLB;<}J8hWVca9Bfre8Fk`b2$%>?3aY61U46MTws}xOH}p zq&)t!A|x@(86gLy#DeH+qU-=V$dm(pkpAjQ2MudO|GPpUrzR?jqk?EMxQv3-U6+s{ z+YqWp`{hjmq@C)2R~IT}>mDCLJFb?GV0VUFI4TXKW6l3vk9iauJ(dS{3EH(!LR!WF z7!_@1_pq+$wuaB5dmmZ0jz%FKU*xeUaq}`>6C;obn*LFASxWt?k436n^uIT{rqQ}R zb<;LwUm|o{lS%GqA-)2rav29Q~dwEYGah>eO-~`<3ua1q#s+9l(-^m2%9YP!z=fV%+j4a)G4Ax6N6?*X0Zmo|FR`K^Z7xz|0e?hq_Ta zt1(44wOyOy7%Siv!*wD%K5{T}(8O)Mm?>XKZ$V;-R;9!K1!+~O6_ZK_yVQUZD_rs` zzzIiv2xcri5-Q*X7Fc<& zYRH3yl+spfHBnmMY$B@hUh4S0sq-_nHaWJiO%Cd>n#}6Cuqu?7 z1UM5mnb8b6E7$vm=@l^PTGm0+uB$bW>uE}&y$fT&d^7UOKnkrrl+7<2r{euNl(DT% zesO!~tP5-*kpkrv)vf|)i}X*ajqkv_5sr)6H8S*{yN5H(YjNE*qT}fSzqXn4tMp>y9_?iz=Q-r1a>l&fLax%YiFxbQqPLFt~iLLC0Cv#34D8`Px4V=t3jmhsDvAlSEoZ@E{Q#=h%m1CQk6qVR$ z(<+NJE-B(uP~3iZQiwVkkW{o*M^Bq(ncgO!Q$4D^9oAvp82C&=GpiuKDr^$%7&ty- zYq$_(L97s1x!6T?L~k0FHJfUIPL5_kMEB$O)w|Tp5S+@@XIL=?$C`ZvxD}K=UP$4Q zevBrQeg_I$gcxkj;O6d@!EQSM`dBqIOBQ=j>yKx<2ZzO>feSkgD7v=2{>@B z?oV^g`4w#T5_FYyd3Dov+w$YBR{CS&Rub%1l}*MlRJ*-Q#C*SZMAjM4kXj&j7136k z5U`uJx?QBqS!?zLWdxg9aIxdGN@gO|6nDFg2<>f zd^tx7u1ErbH=7XgyZZwS2I!EqF;G@kU&?pz*yZh$%y?5>V%gEq!o>%fJCQ8@aOtZ_ zb}|uR(_j=^3-o)qDWnL}uZ_#xU;d-cwepW-4J-E2AyrudhL^UQx@a*U4w>l2dik}7 z+ZVNBn{kw3O-Sw>t_CA{%oa4|7sA%KAnGld1O8Y6GUTCy{tEa%2^YSsEWNeT21HD! z)Fg|IMt<+esN3nYy6xyp@69CULmcrAda{St!xWjPa*3t^g~GtlFsFhNYV4gkPCYUW zrP4pn01a*-2^jEiR0G5bD-ZWlT#FSje21oUz(kW9bK_jVx;JlmSz&g9U3JO0w=d3; z`hy0X(O%6u7D3sWwSn~oiDa}}>M}p#sYX}z=m#a!5yM*%`0VK71&5Mbl$MdNp{hY1 zq4qtQpki}kF}$^3Lrqq8`CN~o>WyXYj6<_Ec`KtRAIVX^5sF0nb4os>weB(Ge(v4zNATz5MGVJ6HQwIaUYAj05W>3fowQ zGfIlherp-J10WP7R*^jq6t$oF;zgNZlMX8F2U`e%3=*7G+_?R(1(p8l4a*FnsUrp& z_L^}`g!K3OxooN`&{yDgzq(;?8VGxGU}#W(C_qw6jp+L80 zE5qGHcu{-8Aepw{4vC+V{fY2Vw3yz_fbMryXHu@&{1qb_1w2*NUa#+!4TjDcnwkZ# zEX-DOe~i^1J3Bb%o3fD*%YF;Eo+J97$33CZnSgs_rMhR#+dFZ5&$le;>}J;4IeG0_ z&y8mD-v_HWYVNvJ=>l-xBE@i%dnSM|-VA1#)I4Umr>BH7q#j_fl_R8cE6?FHzbxox zQDIl`W4krl;g9z1v|j4Y2yO_i-8a}cbw34^Nx(1iy2C}`4>`Wd; z|41L3Is1Ip8*H`!wp@Mmzy#_PVUx&Ok+}93V99@@!}dAF{BBFSlZ;mIUE-s2$aH@u z4fsbUXIM-!ImW6^G4qK(-myif>FpJqY!uvi+_Bk4VM#-csmf6P9%s8`={lSwJGdOzN zBVD<+_D>ji5zZiqgb&|z9Hz>*Cv*vIR^X_)kdrcKaNo=fxel6~PX4puUNPR&cqnRt zY|59hJ95UBV>)87plvH3Fetb=hr&#lyw^r;T*15XWs*l*ufE=oEN3V0Lw7ALODU); zML5H@)os5e=VFBywceovEBvu_r~lU}hj=07 zywt>{3hu%)-ghjd6BnwaB)o#pn;qk4K3;l{l#zGGr3v~EgYNSRv)fK8FD7u7!&9HU zgpo+~0Jqget7%c9V;_knVaPHFJb)qlEluNxE9{+uJ~!X3o#uIaB1995g0(GHc(Q-` z$d{Ue#;lcb!G2^jj&3gckn;q_&Y%s#^=gpuGZf+0?xj_xm}jHr_9q5g%I?KaB=Vc{j%A8?S%>H#4pvuJ+SV z?LH3FEntIt4{8P3lBDgfri_lJ&9e=ha8^1D_;mjL(~>nUZy0ia zuP#b_hDK>(HG02V@#se!?Hl!TA4puuWeAK%&j5mhevFde{gA4~KGBG(Wmuf*|tP@JrD*TYVj}mPk&FWxhln=zA+KKXxNHxf>QbZ%%{P(^tTOG-d-YODdzud>R&8FhSUb0`PK#2kQ*Mz{uYS&>!A`k zP-D||J=$I$Ena!3PR`sSodmx)T&Qh(IffG26JWj=X+3Tkt1&A6q{V+`Yg;BM}Dm-$-2`esnY(HiLKL-c)F6z{9XYynoo!&%A z@IL%1nRKb*w--D?9Qk1G%ZH@lY^aPqwIsx{@VB$BDFFkEWq z15U9Ib>b^hSJ_{N(Chy!>aRzHnm!rA$%etp>$v6ffh?K%8&a!>3=*w*Lq4-}`077D zr)K~J71%h{G*t!{@s=v>4Csu%#V5eSH_{1r0R&|j7Bq$~GgR(U++&lSKxNt%6vP+E z^Y9Z<_{F?uhK>W>QBE6jvh+TR3-zB zyR}jOFe*UOb3)^MDZVSr?dvuFu$k5MG$>cP)a(a7vfCUTFx5(}@cACoMikRc6F8+_ zSLL_U!PMtM$iG4dT_Z+wDpDVr?k4Q+Afh?Tmfa20bI>h92Rp|OMSpOWnNwsXqB#`4 zumCU5l^_smerGV1%*E$F2eG^ADYSBHQnI{_t5JITk8QosW)qa^bFi5vY26H{!Y`GA zl>0W{OL`IuuI$Z`{oh7#DtTL6xbD>Bd*&z^t#8y-!TPdt0a{64YKtHH(?6#p%!p+8 zQZmOSdm!T|zy>Z(EZxhZ0R_?C1+n&(EDtLG$LE@am3|Nw1Uz|mf}At&lWX3xVR8s- zu}#{yg2smhCil?XJsnt%KJj?Sw_K#!P?s6DcI3x?%hXbO=-`>RmbqXAaK8OdoXmZl zFiBw!VFz{<1?d-;>o!+K=y|~wVUk#K_Gh*+QZd;KzZNSJFWG4}P;>FX`29hM$^LR-^hJ_F}6A=?eq=a2G|<>GpN{|_~{ BGcy1H literal 0 HcmV?d00001 diff --git a/bridges/docs/running-relayer.md b/bridges/docs/running-relayer.md new file mode 100644 index 00000000000..710810a476e --- /dev/null +++ b/bridges/docs/running-relayer.md @@ -0,0 +1,343 @@ +# Running your own bridge relayer + +:warning: :construction: Please read the [Disclaimer](#disclaimer) section first :construction: :warning: + +## Disclaimer + +There are several things you should know before running your own relayer: + +- initial bridge version (we call it bridges v1) supports any number of relayers, but **there's no guaranteed +compensation** for running a relayer and/or submitting valid bridge transactions. Most probably you'll end up +spending more funds than getting from rewards - please accept this fact; + +- even if your relayer has managed to submit a valid bridge transaction that has been included into the bridge +hub block, there's no guarantee that you will be able to claim your compensation for that transaction. That's +because compensations are paid from the account, controlled by relay chain governance and it could have no funds +to compensate your useful actions. We'll be working on a proper process to resupply it on-time, but we can't +provide any guarantee until that process is well established. + +## A Brief Introduction into Relayers and our Compensations Scheme + +Omitting details, relayer is an offchain process that is connected to both bridged chains. It looks at the +outbound bridge messages queue and submits message delivery transactions to the target chain. There's a lot +of details behind that simple phrase - you could find more info in the +[High-Level Bridge Overview](./high-level-overview.md) document. + +Reward that is paid to relayer has two parts. The first part static and is controlled by the governance. +It is rather small initially - e.g. you need to deliver `10_000` Kusama -> Polkadot messages to gain single +KSM token. + +The other reward part is dynamic. So to deliver an XCM message from one BridgeHub to another, we'll need to +submit two transactions on different chains. Every transaction has its cost, which is: + +- dynamic, because e.g. message size can change and/or fee factor of the target chain may change; + +- quite large, because those transactions are quite heavy (mostly in terms of size, not weight). + +We are compensating the cost of **valid**, **minimal** and **useful** bridge-related transactions to +relayer, that has submitted such transaction. Valid here means that the transaction doesn't fail. Minimal +means that all data within transaction call is actually required for the transaction to succeed. Useful +means that all supplied data in transaction is new and yet unknown to the target chain. + +We have implemented a relayer that is able to craft such transactions. The rest of document contains a detailed +information on how to deploy this software on your own node. + +## Relayers Concurrency + +As it has been said above, we are not compensating cost of transactions that are not **useful**. For +example, if message `100` has already been delivered from Kusama Bridge Hub to Polkadot Bridge Hub, then another +transaction that delivers the same message `100` won't be **useful**. Hence, no compensation to relayer that +has submitted that second transaction. + +But what if there are several relayers running? They are noticing the same queued message `100` and +simultaneously submit identical message delivery transactions. You may expect that there'll be one lucky +relayer, whose transaction would win the "race" and which will receive the compensation and reward. And +there'll be several other relayers, losing some funds on their unuseful transactions. + +But actually, we have a solution that invalidates transactions of "unlucky" relayers before they are +included into the block. So at least you may be sure that you won't waste your funds on duplicate transactions. + +
+Some details? + +All **unuseful** transactions are rejected by our +[transaction extension](https://github.com/paritytech/polkadot-sdk/blob/master/bridges/bin/runtime-common/src/refund_relayer_extension.rs), +which also handles transaction fee compensations. You may find more info on unuseful (aka obsolete) transactions +by lurking in the code. + +We also have the WiP prototype of relayers coordination protocol, where relayers will get some guarantee +that their transactions will be prioritized over other relayers transactions at their assigned slots. +That is planned for the future version of bridge and the progress is +[tracked here](https://github.com/paritytech/parity-bridges-common/issues/2486). + +
+ +## Prerequisites + +Let's focus on the bridge between Polkadot and Kusama Bridge Hubs. Let's also assume that we want to start +a relayer that "serves" an initial lane [`0x00000001`](https://github.com/polkadot-fellows/runtimes/blob/9ce1bbbbcd7843b3c76ba4d43c036bc311959e9f/system-parachains/bridge-hubs/bridge-hub-kusama/src/bridge_to_polkadot_config.rs#L54). + +
+Lane? + +Think of lane as a queue of messages that need to be delivered to the other/bridged chain. The lane is +bidirectional, meaning that there are four "endpoints". Two "outbound" endpoints (one at every chain), contain +messages that need to be delivered to the bridged chain. Two "inbound" are accepting messages from the bridged +chain and also remember the relayer, who has delivered message(s) to reward it later. + +
+ +The same steps may be performed for other lanes and bridges as well - you'll just need to change several parameters. + +So to start your relayer instance, you'll need to prepare: + +- an address of ws/wss RPC endpoint of the Kusama relay chain; + +- an address of ws/wss RPC endpoint of the Polkadot relay chain; + +- an address of ws/wss RPC endpoint of the Kusama Bridge Hub chain; + +- an address of ws/wss RPC endpoint of the Polkadot Bridge Hub chain; + +- an account on Kusama Bridge Hub; + +- an account on Polkadot Bridge Hub. + +For RPC endpoints, you could start your own nodes, or use some public community nodes. Nodes are not meant to be +archive or provide access to insecure RPC calls. + +To create an account on Bridge Hubs, you could use XCM teleport functionality. E.g. if you have an account on +the relay chain, you could use the `teleportAssets` call of `xcmPallet` and send asset +`V3 { id: Concrete(0, Here), Fungible: }` to beneficiary `V3(0, X1(AccountId32()))` +on destination `V3(0, X1(Parachain(1002)))`. To estimate amounts you need, please refer to the [Costs](#costs) +section of the document. + +## Registering your Relayer Account (Optional, But Please Read) + +Bridge transactions are quite heavy and expensive. We want to minimize block space that can be occupied by +invalid bridge transactions and prioritize valid transactions over invalid. That is achieved by **optional** +relayer registration. Transactions, signed by relayers with active registration, gain huge priority boost. +In exchange, such relayers may be slashed if they submit **invalid** or **non-minimal** transaction. + +Transactions, signed by relayers **without** active registration, on the other hand, receive no priority +boost. It means that if there is active registered relayer, most likely all transactions from unregistered +will be counted as **unuseful**, not included into the block and unregistered relayer won't get any reward +for his operations. + +Before registering, you should know several things about your funds: + +- to register, you need to hold significant amount of funds on your relayer account. As of now, it is + [100 KSM](https://github.com/polkadot-fellows/runtimes/blob/9ce1bbbbcd7843b3c76ba4d43c036bc311959e9f/system-parachains/bridge-hubs/bridge-hub-kusama/src/bridge_to_polkadot_config.rs#L71C14-L71C43) + for registration on Kusama Bridge Hub and + [500 DOT](https://github.com/polkadot-fellows/runtimes/blob/9ce1bbbbcd7843b3c76ba4d43c036bc311959e9f/system-parachains/bridge-hubs/bridge-hub-polkadot/src/bridge_to_kusama_config.rs#L71C14-L71C43) + for registration on Polkadot Bridge Hub; + +- when you are registered, those funds are reserved on relayer account and you can't transfer them. + +The registration itself, has three states: active, inactive or expired. Initially, it is active, meaning that all +your transactions that are **validated** on top of block, where it is active get priority boost. Registration +becomes expired when the block with the number you have specified during registration is "mined". It is the +`validTill` parameter of the `register` call (see below). After that `validTill` block, you may unregister and get +your reserved funds back. There's also an intermediate point between those blocks - it is the `validTill - LEASE`, +where `LEASE` is the the chain constant, controlled by the governance. Initially it is set to `300` blocks. +All your transactions, **validated** between the `validTill - LEASE` and `validTill` blocks do not get the +priority boost. Also, it is forbidden to specify `validTill` such that the `validTill - currentBlock` is less +than the `LEASE`. + +
+Example? + +| Bridge Hub Block | Registration State | Comment | +| ----------------- | ------------------ | ------------------------------------------------------ | +| 100 | Active | You have submitted a tx with the `register(1000)` call | +| 101 | Active | Your message delivery transactions are boosted | +| 102 | Active | Your message delivery transactions are boosted | +| ... | Active | Your message delivery transactions are boosted | +| 700 | Inactive | Your message delivery transactions are not boosted | +| 701 | Inactive | Your message delivery transactions are not boosted | +| ... | Inactive | Your message delivery transactions are not boosted | +| 1000 | Expired | Your may submit a tx with the `deregister` call | + +
+ +So once you have enough funds on your account and have selected the `validTill` parameter value, you +could use the Polkadot JS apps to submit an extrinsic. If you want priority boost for your transactions +on the Kusama Bridge Hub, open the +[Polkadot JS Apps](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fkusama-bridge-hub-rpc.polkadot.io#/extrinsics) +and submit the `register` extrinsic from the `bridgeRelayers` pallet: + +![Register Extrinsic](./bridge-relayers-register.png) + +To deregister, submit the simple `deregister` extrinsic when registration is expired: + +![Deregister Extrinsic](./bridge-relayers-deregister.png) + +At any time, you can prolong your registration by calling the `register` with the larger `validTill`. + +## Costs + +Your relayer account (on both Bridge Hubs) must hold enough funds to be able to pay costs of bridge +transactions. If your relayer behaves correctly, those costs will be compensated and you will be +able to claim it later. + +**IMPORTANT**: you may add tip to your bridge transactions to boost their priority. But our +compensation mechanism never refunds transaction tip, so all tip tokens will be lost. + +
+Types of bridge transactions + +There are two types of bridge transactions: + +- message delivery transaction brings queued message(s) from one Bridge Hub to another. We record + the fact that this specific (your) relayer has delivered those messages; + +- message confirmation transaction confirms that some message have been delivered and also brings + back information on how many messages (your) relayer has delivered. We use this information later + to register delivery rewards on the source chain. + +Several messages/confirmations may be included in a single bridge transaction. Apart from this +data, bridge transaction may include finality and storage proofs, required to prove authenticity of +this data. + +
+ +To deliver and get reward for a single message, the relayer needs to submit two transactions. One +at the source Bridge Hub and one at the target Bridge Hub. Below are costs for Polkadot <> Kusama +messages (as of today): + +- to deliver a single Polkadot -> Kusama message, you would need to pay around `0.06 KSM` at Kusama + Bridge Hub and around `1.62 DOT` at Polkadot Bridge Hub; + +- to deliver a single Kusama -> Polkadot message, you would need to pay around `1.70 DOT` at Polkadot + Bridge Hub and around `0.05 KSM` at Kusama Bridge Hub. + +Those values are not constants - they depend on call weights (that may change from release to release), +on transaction sizes (that depends on message size and chain state) and congestion factor. In any +case - it is your duty to make sure that the relayer has enough funds to pay transaction fees. + +## Claiming your Compensations and Rewards + +Hopefully you have successfully delivered some messages and now can claim your compensation and reward. +This requires submitting several transactions. But first, let's check that you actually have something to +claim. For that, let's check the state of the pallet that tracks all rewards. + +To check your rewards at the Kusama Bridge Hub, go to the +[Polkadot JS Apps](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fkusama-bridge-hub-rpc.polkadot.io#/chainstate) +targeting Kusama Bridge Hub, select the `bridgeRelayers` pallet, choose `relayerRewards` map and +your relayer account. Then: + +- set the `laneId` to `0x00000001` + +- set the `bridgedChainId` to `bhpd`; + +- check the both variants of the `owner` field: `ThisChain` is used to pay for message delivery transactions + and `BridgedChain` is used to pay for message confirmation transactions. + +If check shows that you have some rewards, you can craft the claim transaction, with similar parameters. +For that, go to `Extrinsics` tab of the +[Polkadot JS Apps](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fkusama-bridge-hub-rpc.polkadot.io#/extrinsics) +and submit the following transaction (make sure to change `owner` before): + +![Claim Rewards Extrinsic](./bridge-relayers-claim-rewards.png) + +To claim rewards on Polkadot Bridge Hub you can follow the same process. The only difference is that you +need to set value of the `bridgedChainId` to `bhks`. + +## Starting your Relayer + +### Starting your Rococo <> Westend Relayer + +You may find the relayer image reference in the +[Releases](https://github.com/paritytech/parity-bridges-common/releases) +of this repository. Make sure to check supported (bundled) versions +of release there. For Rococo <> Westend bridge, normally you may use the +latest published release. The release notes always contain the docker +image reference and source files, required to build relayer manually. + +Once you have the docker image, update variables and run the following script: +```sh +export DOCKER_IMAGE= + +export ROCOCO_HOST= +export ROCOCO_PORT= +# or set it to '--rococo-secure' if wss is used above +export ROCOCO_IS_SECURE= +export BRIDGE_HUB_ROCOCO_HOST= +export BRIDGE_HUB_ROCOCO_PORT= +# or set it to '--bridge-hub-rococo-secure' if wss is used above +export BRIDGE_HUB_ROCOCO_IS_SECURE= +export BRIDGE_HUB_ROCOCO_KEY_FILE= + +export WESTEND_HOST= +export WESTEND_PORT= +# or set it to '--westend-secure' if wss is used above +export WESTEND_IS_SECURE= +export BRIDGE_HUB_WESTEND_HOST= +export BRIDGE_HUB_WESTEND_PORT= +# or set it to '--bridge-hub-westend-secure ' if wss is used above +export BRIDGE_HUB_WESTEND_IS_SECURE= +export BRIDGE_HUB_WESTEND_KEY_FILE= + +# you can get extended relay logs (e.g. for debugging issues) by passing `-e RUST_LOG=bridge=trace` +# argument to the `docker` binary +docker run \ + -v $BRIDGE_HUB_ROCOCO_KEY_FILE:/bhr.key \ + -v $BRIDGE_HUB_WESTEND_KEY_FILE:/bhw.key \ + $DOCKER_IMAGE \ + relay-headers-and-messages bridge-hub-rococo-bridge-hub-westend \ + --rococo-host $ROCOCO_HOST \ + --rococo-port $ROCOCO_PORT \ + $ROCOCO_IS_SECURE \ + --rococo-version-mode Auto \ + --bridge-hub-rococo-host $BRIDGE_HUB_ROCOCO_HOST \ + --bridge-hub-rococo-port $BRIDGE_HUB_ROCOCO_PORT \ + $BRIDGE_HUB_ROCOCO_IS_SECURE \ + --bridge-hub-rococo-version-mode Auto \ + --bridge-hub-rococo-signer-file /bhr.key \ + --bridge-hub-rococo-transactions-mortality 16 \ + --westend-host $WESTEND_HOST \ + --westend-port $WESTEND_PORT \ + $WESTEND_IS_SECURE \ + --westend-version-mode Auto \ + --bridge-hub-westend-host $BRIDGE_HUB_WESTEND_HOST \ + --bridge-hub-westend-port $BRIDGE_HUB_WESTEND_PORT \ + $BRIDGE_HUB_WESTEND_IS_SECURE \ + --bridge-hub-westend-version-mode Auto \ + --bridge-hub-westend-signer-file /bhw.key \ + --bridge-hub-westend-transactions-mortality 16 \ + --lane 00000002 +``` + +### Starting your Polkadot <> Kusama Relayer + +*Work in progress, coming soon* + +### Watching your relayer state + +Our relayer provides some Prometheus metrics that you may convert into some fancy Grafana dashboards +and alerts. By default, metrics are exposed at port `9616`. To expose endpoint to the localhost, change +the docker command by adding following two lines: + +```sh +docker run \ + .. + -p 127.0.0.1:9616:9616 \ # tell Docker to bind container port 9616 to host port 9616 + # and listen for connections on the host' localhost interface + .. + $DOCKER_IMAGE \ + relay-headers-and-messages bridge-hub-rococo-bridge-hub-westend \ + --prometheus-host 0.0.0.0 \ # tell `substrate-relay` binary to accept Prometheus endpoint + # connections from everywhere + .. +``` + +You can find more info on configuring Prometheus and Grafana in the +[Monitor your node](https://wiki.polkadot.network/docs/maintain-guides-how-to-monitor-your-node) +guide from Polkadot wiki. + +We have our own set of Grafana dashboards and alerts. You may use them for inspiration. +Please find them in this folder: + +- for Rococo <> Westend bridge: [rococo-westend](https://github.com/paritytech/parity-bridges-common/tree/master/deployments/bridges/rococo-westend). + +- for Polkadot <> Kusama bridge: *work in progress, coming soon* diff --git a/bridges/modules/grandpa/Cargo.toml b/bridges/modules/grandpa/Cargo.toml index dccd7b3bdca..25c6c4e03d5 100644 --- a/bridges/modules/grandpa/Cargo.toml +++ b/bridges/modules/grandpa/Cargo.toml @@ -15,7 +15,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index 173d6f1c164..7d0e1b94959 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } log = { workspace = true } num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } # Bridge dependencies diff --git a/bridges/modules/parachains/Cargo.toml b/bridges/modules/parachains/Cargo.toml index e454a6f2888..a9dd9beeb1f 100644 --- a/bridges/modules/parachains/Cargo.toml +++ b/bridges/modules/parachains/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index b78da5cbeec..f3de72da771 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } # Bridge dependencies diff --git a/bridges/modules/xcm-bridge-hub-router/Cargo.toml b/bridges/modules/xcm-bridge-hub-router/Cargo.toml index 20f8ff4407b..98477f2df18 100644 --- a/bridges/modules/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub-router/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } +scale-info = { version = "2.11.0", default-features = false, features = ["bit-vec", "derive", "serde"] } # Bridge dependencies diff --git a/bridges/modules/xcm-bridge-hub/Cargo.toml b/bridges/modules/xcm-bridge-hub/Cargo.toml index e10119e8649..d910319d9bf 100644 --- a/bridges/modules/xcm-bridge-hub/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } # Bridge Dependencies bp-messages = { path = "../../primitives/messages", default-features = false } diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index 205b593365e..d96a02efba8 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index 8aa6b4b05e5..9d742e3eded 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["bit-vec", "derive"] } serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies diff --git a/bridges/primitives/parachains/Cargo.toml b/bridges/primitives/parachains/Cargo.toml index 575f26193eb..3846c563575 100644 --- a/bridges/primitives/parachains/Cargo.toml +++ b/bridges/primitives/parachains/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } # Bridge dependencies diff --git a/bridges/primitives/polkadot-core/Cargo.toml b/bridges/primitives/polkadot-core/Cargo.toml index c0dae684b5f..5ab502569e4 100644 --- a/bridges/primitives/polkadot-core/Cargo.toml +++ b/bridges/primitives/polkadot-core/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } parity-util-mem = { version = "0.12.0", optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Bridge Dependencies diff --git a/bridges/primitives/relayers/Cargo.toml b/bridges/primitives/relayers/Cargo.toml index 3bd6809d278..71d0fbf2ec3 100644 --- a/bridges/primitives/relayers/Cargo.toml +++ b/bridges/primitives/relayers/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["bit-vec", "derive"] } # Bridge Dependencies diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml index 22206fb2c37..2d454d264a1 100644 --- a/bridges/primitives/runtime/Cargo.toml +++ b/bridges/primitives/runtime/Cargo.toml @@ -15,7 +15,7 @@ hash-db = { version = "0.16.0", default-features = false } impl-trait-for-tuples = "0.2.2" log = { workspace = true } num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } # Substrate Dependencies diff --git a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml index 9297a8603c0..734930f18c4 100644 --- a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["bit-vec", "derive"] } # Substrate Dependencies sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } diff --git a/bridges/scripts/verify-pallets-build.sh b/bridges/scripts/verify-pallets-build.sh index b96bbf1833b..4eefaa8efa0 100755 --- a/bridges/scripts/verify-pallets-build.sh +++ b/bridges/scripts/verify-pallets-build.sh @@ -68,6 +68,7 @@ rm -rf $BRIDGES_FOLDER/modules/beefy rm -rf $BRIDGES_FOLDER/modules/shift-session-manager rm -rf $BRIDGES_FOLDER/primitives/beefy rm -rf $BRIDGES_FOLDER/relays +rm -rf $BRIDGES_FOLDER/relay-clients rm -rf $BRIDGES_FOLDER/scripts/add_license.sh rm -rf $BRIDGES_FOLDER/scripts/build-containers.sh rm -rf $BRIDGES_FOLDER/scripts/ci-cache.sh @@ -77,6 +78,7 @@ rm -rf $BRIDGES_FOLDER/scripts/regenerate_runtimes.sh rm -rf $BRIDGES_FOLDER/scripts/update-weights.sh rm -rf $BRIDGES_FOLDER/scripts/update-weights-setup.sh rm -rf $BRIDGES_FOLDER/scripts/update_substrate.sh +rm -rf $BRIDGES_FOLDER/substrate-relay rm -rf $BRIDGES_FOLDER/tools rm -f $BRIDGES_FOLDER/.dockerignore rm -f $BRIDGES_FOLDER/local.Dockerfile.dockerignore @@ -89,6 +91,7 @@ rm -f $BRIDGES_FOLDER/local.Dockerfile rm -f $BRIDGES_FOLDER/CODEOWNERS rm -f $BRIDGES_FOLDER/Dockerfile rm -f $BRIDGES_FOLDER/rustfmt.toml +rm -f $BRIDGES_FOLDER/RELEASE.md # let's fix Cargo.toml a bit (it'll be helpful if we are in the bridges repo) if [[ ! -f "Cargo.toml" ]]; then -- GitLab From 002d9260f9a0f844f87eefd0abce8bd95aae351b Mon Sep 17 00:00:00 2001 From: Dcompoze Date: Tue, 26 Mar 2024 13:57:57 +0000 Subject: [PATCH 031/128] Fix spelling mistakes across the whole repository (#3808) **Update:** Pushed additional changes based on the review comments. **This pull request fixes various spelling mistakes in this repository.** Most of the changes are contained in the first **3** commits: - `Fix spelling mistakes in comments and docs` - `Fix spelling mistakes in test names` - `Fix spelling mistakes in error messages, panic messages, logs and tracing` Other source code spelling mistakes are separated into individual commits for easier reviewing: - `Fix the spelling of 'authority'` - `Fix the spelling of 'REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY'` - `Fix the spelling of 'prev_enqueud_messages'` - `Fix the spelling of 'endpoint'` - `Fix the spelling of 'children'` - `Fix the spelling of 'PenpalSiblingSovereignAccount'` - `Fix the spelling of 'PenpalSudoAccount'` - `Fix the spelling of 'insufficient'` - `Fix the spelling of 'PalletXcmExtrinsicsBenchmark'` - `Fix the spelling of 'subtracted'` - `Fix the spelling of 'CandidatePendingAvailability'` - `Fix the spelling of 'exclusive'` - `Fix the spelling of 'until'` - `Fix the spelling of 'discriminator'` - `Fix the spelling of 'nonexistent'` - `Fix the spelling of 'subsystem'` - `Fix the spelling of 'indices'` - `Fix the spelling of 'committed'` - `Fix the spelling of 'topology'` - `Fix the spelling of 'response'` - `Fix the spelling of 'beneficiary'` - `Fix the spelling of 'formatted'` - `Fix the spelling of 'UNKNOWN_PROOF_REQUEST'` - `Fix the spelling of 'succeeded'` - `Fix the spelling of 'reopened'` - `Fix the spelling of 'proposer'` - `Fix the spelling of 'InstantiationNonce'` - `Fix the spelling of 'depositor'` - `Fix the spelling of 'expiration'` - `Fix the spelling of 'phantom'` - `Fix the spelling of 'AggregatedKeyValue'` - `Fix the spelling of 'randomness'` - `Fix the spelling of 'defendant'` - `Fix the spelling of 'AquaticMammal'` - `Fix the spelling of 'transactions'` - `Fix the spelling of 'PassingTracingSubscriber'` - `Fix the spelling of 'TxSignaturePayload'` - `Fix the spelling of 'versioning'` - `Fix the spelling of 'descendant'` - `Fix the spelling of 'overridden'` - `Fix the spelling of 'network'` Let me know if this structure is adequate. **Note:** The usage of the words `Merkle`, `Merkelize`, `Merklization`, `Merkelization`, `Merkleization`, is somewhat inconsistent but I left it as it is. ~~**Note:** In some places the term `Receival` is used to refer to message reception, IMO `Reception` is the correct word here, but I left it as it is.~~ ~~**Note:** In some places the term `Overlayed` is used instead of the more acceptable version `Overlaid` but I also left it as it is.~~ ~~**Note:** In some places the term `Applyable` is used instead of the correct version `Applicable` but I also left it as it is.~~ **Note:** Some usage of British vs American english e.g. `judgement` vs `judgment`, `initialise` vs `initialize`, `optimise` vs `optimize` etc. are both present in different places, but I suppose that's understandable given the number of contributors. ~~**Note:** There is a spelling mistake in `.github/CODEOWNERS` but it triggers errors in CI when I make changes to it, so I left it as it is.~~ --- .github/CODEOWNERS | 2 +- .github/scripts/common/lib.sh | 2 +- .gitlab/pipeline/build.yml | 2 +- .gitlab/rust-features.sh | 2 +- .../bin/runtime-common/src/messages_api.rs | 2 +- .../src/messages_xcm_extension.rs | 2 +- bridges/bin/runtime-common/src/mock.rs | 2 +- .../runtime-common/src/priority_calculator.rs | 2 +- .../src/refund_relayer_extension.rs | 4 +- bridges/chains/chain-kusama/src/lib.rs | 4 +- .../chains/chain-polkadot-bulletin/src/lib.rs | 8 +- bridges/chains/chain-polkadot/src/lib.rs | 4 +- bridges/chains/chain-rococo/src/lib.rs | 4 +- bridges/chains/chain-westend/src/lib.rs | 4 +- bridges/modules/grandpa/README.md | 2 +- bridges/modules/grandpa/src/call_ext.rs | 8 +- bridges/modules/grandpa/src/lib.rs | 4 +- bridges/modules/grandpa/src/mock.rs | 2 +- bridges/modules/messages/src/inbound_lane.rs | 40 +++++----- bridges/modules/messages/src/lib.rs | 30 ++++---- bridges/modules/messages/src/outbound_lane.rs | 30 ++++---- bridges/modules/parachains/src/mock.rs | 4 +- .../modules/xcm-bridge-hub-router/src/lib.rs | 2 +- bridges/modules/xcm-bridge-hub/Cargo.toml | 2 +- .../header-chain/src/justification/mod.rs | 2 +- .../src/justification/verification/mod.rs | 2 +- bridges/primitives/header-chain/src/lib.rs | 8 +- bridges/primitives/messages/src/lib.rs | 8 +- bridges/primitives/polkadot-core/src/lib.rs | 2 +- bridges/primitives/runtime/src/chain.rs | 6 +- bridges/primitives/runtime/src/lib.rs | 4 +- bridges/primitives/test-utils/src/lib.rs | 2 +- bridges/scripts/verify-pallets-build.sh | 2 +- .../pallets/ethereum-client/src/lib.rs | 2 +- .../pallets/ethereum-client/src/tests.rs | 2 +- .../register_token_with_insufficient_fee.rs | 2 +- .../environments/rococo-westend/rococo.zndsl | 4 +- .../environments/rococo-westend/westend.zndsl | 4 +- .../utils/generate_hex_encoded_call/index.js | 18 ++--- bridges/testing/run-tests.sh | 2 +- .../run.sh | 2 +- .../consensus/common/src/level_monitor.rs | 2 +- cumulus/client/consensus/common/src/tests.rs | 2 +- cumulus/client/pov-recovery/src/lib.rs | 10 +-- .../src/reconnecting_ws_client.rs | 3 +- cumulus/pallets/parachain-system/src/lib.rs | 2 +- .../src/relay_state_snapshot.rs | 2 +- cumulus/pallets/xcmp-queue/src/lib.rs | 2 +- .../assets/asset-hub-rococo/src/genesis.rs | 4 +- .../assets/asset-hub-westend/src/genesis.rs | 4 +- .../parachains/testing/penpal/src/genesis.rs | 6 +- .../parachains/testing/penpal/src/lib.rs | 2 +- .../emulated/common/src/impls.rs | 6 +- .../emulated/common/src/lib.rs | 2 +- .../src/tests/reserve_transfer.rs | 2 +- .../src/tests/reserve_transfer.rs | 6 +- .../bridge-hub-rococo/src/tests/snowbridge.rs | 4 +- .../assets/asset-hub-rococo/src/xcm_config.rs | 2 +- .../asset-hub-westend/src/xcm_config.rs | 2 +- .../assets/asset-hub-westend/tests/tests.rs | 4 +- .../assets/test-utils/src/test_cases.rs | 2 +- .../test-utils/src/test_cases_over_bridge.rs | 2 +- .../runtimes/people/people-rococo/src/lib.rs | 6 +- .../runtimes/people/people-westend/src/lib.rs | 8 +- .../runtimes/testing/penpal/src/xcm_config.rs | 2 +- cumulus/polkadot-parachain/src/command.rs | 2 +- .../storage-weight-reclaim/src/lib.rs | 8 +- cumulus/primitives/timestamp/src/lib.rs | 2 +- cumulus/primitives/utility/src/lib.rs | 25 +++--- cumulus/scripts/scale_encode_genesis/index.js | 8 +- cumulus/scripts/temp_parachain_types.json | 2 +- cumulus/test/runtime/src/lib.rs | 2 +- .../test/service/benches/validate_block.rs | 2 +- cumulus/test/service/src/lib.rs | 4 +- docker/dockerfiles/binary_injected.Dockerfile | 2 +- docker/scripts/build-injected.sh | 2 +- docs/sdk/Cargo.toml | 2 +- .../frame_runtime_upgrades_and_migrations.rs | 8 +- polkadot/cli/src/cli.rs | 2 +- polkadot/grafana/README.md | 2 +- .../src/approval_db/v1/tests.rs | 2 +- .../src/approval_db/v2/migration_helpers.rs | 2 +- .../src/approval_db/v2/tests.rs | 2 +- .../src/approval_db/v3/migration_helpers.rs | 4 +- .../src/approval_db/v3/tests.rs | 2 +- .../node/core/approval-voting/src/criteria.rs | 2 +- .../node/core/approval-voting/src/import.rs | 2 +- polkadot/node/core/approval-voting/src/lib.rs | 2 +- .../node/core/approval-voting/src/tests.rs | 24 +++--- .../node/core/approval-voting/src/time.rs | 4 +- polkadot/node/core/av-store/src/lib.rs | 2 +- polkadot/node/core/backing/src/lib.rs | 2 +- .../node/core/candidate-validation/src/lib.rs | 2 +- polkadot/node/core/chain-selection/src/lib.rs | 4 +- .../node/core/chain-selection/src/tests.rs | 4 +- .../core/dispute-coordinator/src/db/v1.rs | 2 +- .../node/core/dispute-coordinator/src/lib.rs | 2 +- .../src/scraping/candidates.rs | 2 +- .../dispute-coordinator/src/scraping/mod.rs | 2 +- .../dispute-coordinator/src/scraping/tests.rs | 4 +- .../core/dispute-coordinator/src/tests.rs | 6 +- .../src/disputes/prioritized_selection/mod.rs | 2 +- polkadot/node/core/pvf-checker/src/tests.rs | 6 +- polkadot/node/core/pvf/common/src/pvf.rs | 4 +- polkadot/node/core/pvf/src/host.rs | 2 +- polkadot/node/core/pvf/src/priority.rs | 2 +- .../node/core/pvf/src/worker_interface.rs | 6 +- polkadot/node/core/pvf/tests/it/main.rs | 4 +- polkadot/node/core/runtime-api/src/tests.rs | 8 +- polkadot/node/jaeger/src/spans.rs | 2 +- polkadot/node/malus/README.md | 2 +- .../network/approval-distribution/src/lib.rs | 16 ++-- .../approval-distribution/src/metrics.rs | 2 +- .../approval-distribution/src/tests.rs | 32 ++++---- .../src/requester/fetch_task/mod.rs | 4 +- .../network/availability-recovery/src/lib.rs | 2 +- .../availability-recovery/src/metrics.rs | 4 +- .../bitfield-distribution/src/metrics.rs | 4 +- .../bitfield-distribution/src/tests.rs | 2 +- .../dispute-distribution/src/sender/mod.rs | 10 +-- .../dispute-distribution/src/tests/mock.rs | 2 +- .../node/network/gossip-support/src/tests.rs | 4 +- .../network/protocol/src/grid_topology.rs | 6 +- polkadot/node/network/protocol/src/lib.rs | 2 +- .../node/network/protocol/src/peer_set.rs | 2 +- .../src/request_response/incoming/mod.rs | 2 +- .../protocol/src/request_response/mod.rs | 2 +- .../protocol/src/request_response/v1.rs | 2 +- .../src/legacy_v1/responder.rs | 2 +- .../src/legacy_v1/tests.rs | 2 +- .../statement-distribution/src/v2/cluster.rs | 4 +- .../statement-distribution/src/v2/mod.rs | 6 +- .../statement-distribution/src/v2/requests.rs | 2 +- polkadot/node/overseer/src/lib.rs | 2 +- polkadot/node/primitives/src/approval.rs | 2 +- polkadot/node/primitives/src/disputes/mod.rs | 4 +- polkadot/node/primitives/src/lib.rs | 2 +- polkadot/node/service/src/overseer.rs | 2 +- .../node/service/src/parachains_db/upgrade.rs | 4 +- .../node/service/src/relay_chain_selection.rs | 4 +- .../grafana/availability-read.json | 6 +- .../src/lib/approval/message_generator.rs | 10 +-- .../subsystem-bench/src/lib/approval/mod.rs | 16 ++-- .../src/lib/approval/test_message.rs | 14 ++-- .../src/lib/availability/mod.rs | 6 +- .../subsystem-bench/src/lib/configuration.rs | 6 +- .../subsystem-bench/src/lib/mock/av_store.rs | 2 +- .../subsystem-bench/src/lib/mock/chain_api.rs | 2 +- .../src/lib/mock/network_bridge.rs | 12 +-- .../node/subsystem-bench/src/lib/network.rs | 10 +-- .../node/subsystem-bench/src/lib/utils.rs | 76 +++++++++++++++++++ polkadot/node/subsystem-types/src/messages.rs | 2 +- .../src/inclusion_emulator/mod.rs | 2 +- polkadot/node/subsystem-util/src/lib.rs | 2 +- polkadot/primitives/src/v6/mod.rs | 4 +- polkadot/primitives/src/vstaging/mod.rs | 4 +- polkadot/primitives/test-helpers/src/lib.rs | 2 +- .../implementers-guide/src/disputes-flow.md | 2 +- .../node/approval/approval-distribution.md | 4 +- .../availability/availability-recovery.md | 2 +- .../src/node/disputes/dispute-coordinator.md | 8 +- .../implementers-guide/src/node/overseer.md | 4 +- .../src/node/utility/pvf-prechecker.md | 2 +- .../implementers-guide/src/runtime/README.md | 2 +- .../implementers-guide/src/runtime/hrmp.md | 4 +- .../src/runtime/parainherent.md | 2 +- .../implementers-guide/src/types/approval.md | 4 +- .../implementers-guide/src/types/disputes.md | 2 +- .../src/types/overseer-protocol.md | 2 +- polkadot/roadmap/phase-1.toml | 2 +- polkadot/runtime/common/src/xcm_sender.rs | 2 +- .../parachains/src/assigner_coretime/tests.rs | 2 +- .../parachains/src/assigner_on_demand/mod.rs | 4 +- .../parachains/src/coretime/migration.rs | 2 +- polkadot/runtime/parachains/src/disputes.rs | 2 +- .../parachains/src/disputes/slashing.rs | 2 +- polkadot/runtime/parachains/src/hrmp.rs | 6 +- polkadot/runtime/parachains/src/hrmp/tests.rs | 2 +- polkadot/runtime/parachains/src/paras/mod.rs | 2 +- .../runtime/parachains/src/paras/tests.rs | 4 +- .../parachains/src/paras_inherent/mod.rs | 4 +- .../parachains/src/scheduler/migration.rs | 2 +- .../runtime/parachains/src/scheduler/tests.rs | 2 +- .../runtime/parachains/src/session_info.rs | 2 +- polkadot/runtime/parachains/src/ump_tests.rs | 4 +- polkadot/runtime/rococo/README.md | 2 +- .../runtime/westend/src/weights/xcm/mod.rs | 2 +- polkadot/tests/common.rs | 4 +- .../tests/running_the_node_and_interrupt.rs | 2 +- .../pallet-xcm/src/tests/assets_transfer.rs | 4 +- polkadot/xcm/src/v2/multilocation.rs | 2 +- polkadot/xcm/src/v3/multilocation.rs | 2 +- polkadot/xcm/src/v4/location.rs | 2 +- polkadot/xcm/xcm-builder/src/barriers.rs | 2 +- polkadot/xcm/xcm-builder/src/tests/origins.rs | 4 +- polkadot/xcm/xcm-builder/tests/scenarios.rs | 2 +- .../xcm-executor/integration-tests/src/lib.rs | 2 +- .../xcm-executor/src/traits/should_execute.rs | 2 +- polkadot/xcm/xcm-simulator/example/src/lib.rs | 4 +- prdoc/pr_3808.prdoc | 20 +++++ scripts/bridges_update_subtree.sh | 2 +- scripts/snowbridge_update_subtree.sh | 2 +- .../bin/node/cli/benches/block_production.rs | 4 +- substrate/bin/node/cli/src/cli.rs | 2 +- substrate/bin/node/cli/src/service.rs | 2 +- substrate/bin/node/cli/tests/fees.rs | 2 +- substrate/bin/node/testing/src/bench.rs | 6 +- .../bin/utils/chain-spec-builder/src/lib.rs | 2 +- substrate/bin/utils/subkey/README.md | 4 +- substrate/bin/utils/subkey/src/lib.rs | 10 +-- .../basic-authorship/src/basic_authorship.rs | 2 +- substrate/client/chain-spec/src/chain_spec.rs | 10 +-- substrate/client/chain-spec/src/extension.rs | 2 +- .../chain-spec/src/genesis_config_builder.rs | 2 +- substrate/client/chain-spec/src/lib.rs | 2 +- substrate/client/cli/src/commands/vanity.rs | 4 +- .../client/cli/src/params/network_params.rs | 2 +- substrate/client/consensus/aura/src/lib.rs | 2 +- .../client/consensus/babe/src/authorship.rs | 2 +- substrate/client/consensus/babe/src/lib.rs | 2 +- .../client/consensus/babe/src/migration.rs | 2 +- substrate/client/consensus/beefy/README.md | 2 +- .../consensus/beefy/src/communication/mod.rs | 2 +- .../incoming_requests_handler.rs | 2 +- .../client/consensus/beefy/src/keystore.rs | 8 +- .../client/consensus/beefy/src/worker.rs | 6 +- substrate/client/consensus/common/Cargo.toml | 2 +- .../consensus/common/src/import_queue.rs | 2 +- .../common/src/import_queue/basic_queue.rs | 4 +- substrate/client/consensus/epochs/src/lib.rs | 4 +- .../grandpa/src/communication/mod.rs | 6 +- .../grandpa/src/communication/periodic.rs | 2 +- .../client/consensus/grandpa/src/tests.rs | 4 +- .../consensus/grandpa/src/until_imported.rs | 2 +- .../consensus/grandpa/src/voting_rule.rs | 2 +- .../consensus/grandpa/src/warp_proof.rs | 2 +- .../manual-seal/src/consensus/babe.rs | 2 +- substrate/client/consensus/slots/src/lib.rs | 6 +- substrate/client/db/src/upgrade.rs | 2 +- substrate/client/db/src/utils.rs | 2 +- substrate/client/executor/common/src/error.rs | 2 +- .../client/executor/wasmtime/src/runtime.rs | 2 +- .../client/executor/wasmtime/src/tests.rs | 4 +- substrate/client/network/bitswap/src/lib.rs | 2 +- .../src/protocol/notifications/behaviour.rs | 2 +- .../src/protocol/notifications/handler.rs | 2 +- .../src/protocol/notifications/service/mod.rs | 6 +- .../protocol/notifications/service/tests.rs | 2 +- .../notifications/upgrade/notifications.rs | 4 +- .../client/network/src/protocol_controller.rs | 4 +- substrate/client/network/sync/src/engine.rs | 6 +- substrate/client/network/sync/src/strategy.rs | 12 +-- .../network/sync/src/strategy/chain_sync.rs | 4 +- .../sync/src/strategy/chain_sync/test.rs | 8 +- .../client/network/sync/src/strategy/state.rs | 16 ++-- .../client/network/sync/src/strategy/warp.rs | 2 +- .../network/sync/src/warp_request_handler.rs | 2 +- substrate/client/network/test/src/sync.rs | 2 +- substrate/client/offchain/src/api/http.rs | 2 +- .../rpc-servers/src/middleware/metrics.rs | 2 +- .../client/rpc-spec-v2/src/archive/tests.rs | 4 +- .../rpc-spec-v2/src/chain_head/event.rs | 4 +- .../rpc-spec-v2/src/chain_head/tests.rs | 6 +- .../tests/transaction_broadcast_tests.rs | 6 +- .../src/transaction/transaction_broadcast.rs | 4 +- substrate/client/rpc/src/statement/mod.rs | 2 +- .../service/src/chain_ops/import_blocks.rs | 2 +- substrate/client/service/src/config.rs | 2 +- substrate/client/state-db/src/lib.rs | 4 +- substrate/client/state-db/src/noncanonical.rs | 10 +-- substrate/client/telemetry/src/lib.rs | 2 +- substrate/client/transaction-pool/README.md | 2 +- .../client/transaction-pool/tests/pool.rs | 2 +- substrate/client/utils/src/notification.rs | 2 +- substrate/frame/alliance/README.md | 2 +- substrate/frame/alliance/src/lib.rs | 6 +- substrate/frame/alliance/src/tests.rs | 2 +- substrate/frame/asset-conversion/src/lib.rs | 2 +- substrate/frame/asset-rate/src/lib.rs | 2 +- substrate/frame/assets/src/lib.rs | 2 +- substrate/frame/assets/src/tests.rs | 2 +- substrate/frame/babe/src/benchmarking.rs | 2 +- substrate/frame/bags-list/src/list/tests.rs | 2 +- substrate/frame/balances/src/lib.rs | 6 +- .../balances/src/tests/currency_tests.rs | 4 +- .../balances/src/tests/reentrancy_tests.rs | 4 +- substrate/frame/benchmarking/src/analysis.rs | 2 +- substrate/frame/benchmarking/src/lib.rs | 2 +- substrate/frame/benchmarking/src/v1.rs | 6 +- substrate/frame/broker/src/lib.rs | 2 +- substrate/frame/broker/src/types.rs | 4 +- substrate/frame/collective/README.md | 2 +- substrate/frame/collective/src/lib.rs | 2 +- substrate/frame/collective/src/tests.rs | 2 +- substrate/frame/contracts/README.md | 2 +- .../fixtures/contracts/multi_store.rs | 2 +- .../frame/contracts/src/benchmarking/code.rs | 2 +- .../frame/contracts/src/benchmarking/mod.rs | 8 +- substrate/frame/contracts/src/exec.rs | 4 +- substrate/frame/contracts/src/gas.rs | 2 +- substrate/frame/contracts/src/lib.rs | 2 +- .../frame/contracts/src/migration/v09.rs | 2 +- substrate/frame/contracts/src/schedule.rs | 2 +- substrate/frame/contracts/src/tests.rs | 18 ++--- substrate/frame/contracts/src/wasm/mod.rs | 2 +- substrate/frame/contracts/src/wasm/runtime.rs | 6 +- .../frame/conviction-voting/src/tests.rs | 2 +- .../core-fellowship/src/tests/integration.rs | 2 +- substrate/frame/democracy/src/lib.rs | 2 +- .../unlock_and_unreserve_all_funds.rs | 42 +++++----- substrate/frame/democracy/src/tests.rs | 2 +- .../frame/democracy/src/tests/cancellation.rs | 2 +- .../src/helpers.rs | 4 +- .../election-provider-multi-phase/src/lib.rs | 4 +- .../src/unsigned.rs | 4 +- .../test-staking-e2e/src/lib.rs | 10 +-- .../test-staking-e2e/src/mock.rs | 4 +- .../election-provider-support/src/bounds.rs | 4 +- .../frame/examples/offchain-worker/src/lib.rs | 2 +- .../single-block-migrations/src/lib.rs | 4 +- substrate/frame/examples/split/Cargo.toml | 2 +- substrate/frame/examples/tasks/Cargo.toml | 2 +- substrate/frame/fast-unstake/src/tests.rs | 2 +- substrate/frame/grandpa/src/benchmarking.rs | 2 +- substrate/frame/grandpa/src/tests.rs | 2 +- substrate/frame/identity/src/benchmarking.rs | 4 +- substrate/frame/identity/src/lib.rs | 2 +- substrate/frame/im-online/src/tests.rs | 2 +- .../frame/merkle-mountain-range/src/lib.rs | 2 +- substrate/frame/message-queue/src/lib.rs | 2 +- substrate/frame/message-queue/src/tests.rs | 2 +- .../frame/migrations/src/mock_helpers.rs | 2 +- substrate/frame/nis/README.md | 2 +- substrate/frame/nis/src/lib.rs | 4 +- substrate/frame/nis/src/tests.rs | 2 +- substrate/frame/node-authorization/src/lib.rs | 10 +-- substrate/frame/nomination-pools/src/lib.rs | 8 +- .../frame/nomination-pools/src/migration.rs | 2 +- substrate/frame/offences/src/tests.rs | 2 +- substrate/frame/paged-list/src/paged_list.rs | 2 +- substrate/frame/parameters/src/lib.rs | 34 ++++----- substrate/frame/parameters/src/tests/unit.rs | 2 +- substrate/frame/proxy/src/lib.rs | 2 +- substrate/frame/root-testing/src/lib.rs | 2 +- .../frame/salary/src/tests/integration.rs | 2 +- substrate/frame/salary/src/tests/unit.rs | 8 +- substrate/frame/sassafras/src/benchmarking.rs | 2 +- substrate/frame/sassafras/src/lib.rs | 8 +- substrate/frame/scheduler/README.md | 2 +- substrate/frame/scheduler/src/benchmarking.rs | 4 +- substrate/frame/scheduler/src/lib.rs | 2 +- substrate/frame/scheduler/src/tests.rs | 4 +- substrate/frame/society/src/lib.rs | 8 +- substrate/frame/society/src/migrations.rs | 8 +- substrate/frame/society/src/tests.rs | 2 +- substrate/frame/staking/src/ledger.rs | 2 +- substrate/frame/staking/src/migrations.rs | 2 +- substrate/frame/staking/src/pallet/impls.rs | 4 +- substrate/frame/staking/src/pallet/mod.rs | 8 +- substrate/frame/staking/src/tests.rs | 10 +-- substrate/frame/sudo/src/lib.rs | 2 +- .../frame/support/procedural/src/benchmark.rs | 4 +- .../procedural/src/construct_runtime/mod.rs | 6 +- .../procedural/src/construct_runtime/parse.rs | 10 +-- .../support/procedural/src/derive_impl.rs | 2 +- .../support/procedural/src/dynamic_params.rs | 18 ++--- substrate/frame/support/procedural/src/lib.rs | 6 +- .../procedural/src/pallet/parse/mod.rs | 2 +- .../frame/support/src/dispatch_context.rs | 4 +- substrate/frame/support/src/instances.rs | 32 ++++---- substrate/frame/support/src/lib.rs | 12 +-- .../frame/support/src/storage/migration.rs | 8 +- substrate/frame/support/src/storage/mod.rs | 21 ++--- .../frame/support/src/storage/stream_iter.rs | 2 +- .../support/src/storage/types/counted_map.rs | 10 +-- .../support/src/storage/types/counted_nmap.rs | 2 +- .../support/src/storage/types/double_map.rs | 5 +- .../frame/support/src/storage/types/map.rs | 5 +- .../frame/support/src/storage/types/mod.rs | 2 +- .../frame/support/src/storage/types/nmap.rs | 2 +- .../frame/support/src/storage/types/value.rs | 5 +- .../frame/support/src/traits/dispatch.rs | 2 +- .../support/src/traits/dynamic_params.rs | 40 +++++----- substrate/frame/support/src/traits/hooks.rs | 2 +- substrate/frame/support/src/traits/misc.rs | 2 +- .../conformance_tests/regular/unbalanced.rs | 2 +- .../traits/tokens/imbalance/split_two_ways.rs | 2 +- .../frame/support/src/traits/tokens/misc.rs | 2 +- .../derive_impl_ui/attached_to_non_impl.rs | 2 +- .../derive_impl_ui/bad_default_impl_path.rs | 4 +- .../derive_impl_ui/bad_disambiguation_path.rs | 4 +- .../missing_disambiguation_path.rs | 4 +- .../derive_impl_ui/pass/basic_overriding.rs | 12 +-- .../test/tests/pallet_outer_enums_explicit.rs | 2 +- .../test/tests/pallet_outer_enums_implicit.rs | 6 +- .../pallet_ui/pass/inherited_call_weight3.rs | 2 +- substrate/frame/system/src/limits.rs | 2 +- substrate/frame/system/src/offchain.rs | 6 +- substrate/frame/tips/src/migrations/mod.rs | 2 +- .../frame/transaction-payment/src/tests.rs | 2 +- substrate/frame/transaction-storage/README.md | 2 +- .../frame/transaction-storage/src/lib.rs | 2 +- .../frame/transaction-storage/src/tests.rs | 6 +- substrate/frame/treasury/src/lib.rs | 6 +- substrate/frame/uniques/src/tests.rs | 4 +- substrate/frame/utility/README.md | 2 +- substrate/frame/vesting/src/migrations.rs | 2 +- .../arithmetic/fuzzer/src/fixed_point.rs | 2 +- .../primitives/arithmetic/src/fixed_point.rs | 2 +- .../primitives/blockchain/src/backend.rs | 2 +- substrate/primitives/blockchain/src/error.rs | 2 +- .../blockchain/src/header_metadata.rs | 4 +- .../primitives/consensus/babe/src/lib.rs | 4 +- .../primitives/consensus/beefy/src/lib.rs | 2 +- .../primitives/consensus/beefy/src/mmr.rs | 4 +- .../primitives/consensus/beefy/src/payload.rs | 2 +- .../primitives/consensus/common/src/lib.rs | 2 +- .../consensus/sassafras/src/ticket.rs | 2 +- .../primitives/consensus/sassafras/src/vrf.rs | 2 +- substrate/primitives/core/src/address_uri.rs | 2 +- substrate/primitives/core/src/bandersnatch.rs | 2 +- .../primitives/core/src/paired_crypto.rs | 2 +- .../primitives/inherents/src/client_side.rs | 2 +- substrate/primitives/io/Cargo.toml | 2 +- substrate/primitives/io/src/lib.rs | 8 +- .../maybe-compressed-blob/Cargo.toml | 2 +- substrate/primitives/metadata-ir/src/types.rs | 2 +- .../primitives/npos-elections/src/pjr.rs | 2 +- .../primitives/npos-elections/src/reduce.rs | 2 +- .../primitives/runtime-interface/src/lib.rs | 2 +- .../test-wasm-deprecated/src/lib.rs | 12 +-- .../runtime-interface/test-wasm/src/lib.rs | 22 +++--- .../runtime-interface/test/src/lib.rs | 10 +-- .../src/generic/unchecked_extrinsic.rs | 2 +- .../runtime/src/offchain/storage_lock.rs | 2 +- substrate/primitives/runtime/src/testing.rs | 8 +- substrate/primitives/runtime/src/traits.rs | 4 +- substrate/primitives/staking/src/offence.rs | 2 +- substrate/primitives/state-machine/src/ext.rs | 4 +- substrate/primitives/state-machine/src/lib.rs | 12 +-- .../src/overlayed_changes/changeset.rs | 14 ++-- .../src/overlayed_changes/mod.rs | 8 +- .../primitives/state-machine/src/testing.rs | 2 +- .../state-machine/src/trie_backend_essence.rs | 8 +- substrate/primitives/storage/src/lib.rs | 2 +- substrate/primitives/tracing/src/lib.rs | 8 +- substrate/primitives/tracing/src/types.rs | 10 +-- .../transaction-storage-proof/src/lib.rs | 2 +- substrate/primitives/trie/src/cache/mod.rs | 2 +- substrate/primitives/trie/src/lib.rs | 4 +- .../proc-macro/src/decl_runtime_version.rs | 2 +- substrate/primitives/version/src/lib.rs | 2 +- substrate/utils/fork-tree/src/lib.rs | 6 +- .../benchmarking-cli/src/pallet/command.rs | 6 +- .../utils/frame/generate-bags/src/lib.rs | 2 +- .../frame/remote-externalities/src/lib.rs | 2 +- .../rpc/state-trie-migration-rpc/src/lib.rs | 2 +- substrate/utils/substrate-bip39/README.md | 8 +- substrate/utils/substrate-bip39/src/lib.rs | 2 +- substrate/utils/wasm-builder/src/lib.rs | 2 +- .../utils/wasm-builder/src/wasm_project.rs | 14 ++-- templates/minimal/node/Cargo.toml | 2 +- templates/minimal/runtime/Cargo.toml | 2 +- 463 files changed, 1119 insertions(+), 1017 deletions(-) create mode 100644 polkadot/node/subsystem-bench/src/lib/utils.rs create mode 100644 prdoc/pr_3808.prdoc diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index fdaa0c8628f..4fc5b97caae 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -13,7 +13,7 @@ # - Multiple owners are supported. # - Either handle (e.g, @github_user or @github/team) or email can be used. Keep in mind, # that handles might work better because they are more recognizable on GitHub, -# eyou can use them for mentioning unlike an email. +# you can use them for mentioning unlike an email. # - The latest matching rule, if multiple, takes precedence. # CI diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index 29dc269ffd2..932a6d546c3 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -237,7 +237,7 @@ fetch_release_artifacts() { popd > /dev/null } -# Fetch the release artifacts like binary and sigantures from S3. Assumes the ENV are set: +# Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set: # - RELEASE_ID # - GITHUB_TOKEN # - REPO in the form paritytech/polkadot diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index f8de6135572..44d66eb2f5e 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -350,7 +350,7 @@ build-runtimes-polkavm: - .run-immediately # - .collect-artifact variables: - # this variable gets overriden by "rusty-cachier environment inject", use the value as default + # this variable gets overridden by "rusty-cachier environment inject", use the value as default CARGO_TARGET_DIR: "$CI_PROJECT_DIR/target" before_script: - mkdir -p ./artifacts/subkey diff --git a/.gitlab/rust-features.sh b/.gitlab/rust-features.sh index c0ac192a6ec..c3ec61ab871 100755 --- a/.gitlab/rust-features.sh +++ b/.gitlab/rust-features.sh @@ -15,7 +15,7 @@ # # The steps of this script: # 1. Check that all required dependencies are installed. -# 2. Check that all rules are fullfilled for the whole workspace. If not: +# 2. Check that all rules are fulfilled for the whole workspace. If not: # 4. Check all crates to find the offending ones. # 5. Print all offending crates and exit with code 1. # diff --git a/bridges/bin/runtime-common/src/messages_api.rs b/bridges/bin/runtime-common/src/messages_api.rs index ccf1c754041..7fbdeb36612 100644 --- a/bridges/bin/runtime-common/src/messages_api.rs +++ b/bridges/bin/runtime-common/src/messages_api.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Helpers for implementing various message-related runtime API mthods. +//! Helpers for implementing various message-related runtime API methods. use bp_messages::{ InboundMessageDetails, LaneId, MessageNonce, MessagePayload, OutboundMessageDetails, diff --git a/bridges/bin/runtime-common/src/messages_xcm_extension.rs b/bridges/bin/runtime-common/src/messages_xcm_extension.rs index e3da6155f08..46ed4da0d85 100644 --- a/bridges/bin/runtime-common/src/messages_xcm_extension.rs +++ b/bridges/bin/runtime-common/src/messages_xcm_extension.rs @@ -248,7 +248,7 @@ impl LocalXcmQueueManager { sender_and_lane: &SenderAndLane, enqueued_messages: MessageNonce, ) { - // skip if we dont want to handle congestion + // skip if we don't want to handle congestion if !H::supports_congestion_detection() { return } diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index deee4524e85..8c4cb2233e1 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -379,7 +379,7 @@ impl Chain for BridgedUnderlyingChain { impl ChainWithGrandpa for BridgedUnderlyingChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = 16; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; const MAX_MANDATORY_HEADER_SIZE: u32 = 256; const AVERAGE_HEADER_SIZE: u32 = 64; } diff --git a/bridges/bin/runtime-common/src/priority_calculator.rs b/bridges/bin/runtime-common/src/priority_calculator.rs index c2737128e34..5035553f508 100644 --- a/bridges/bin/runtime-common/src/priority_calculator.rs +++ b/bridges/bin/runtime-common/src/priority_calculator.rs @@ -163,7 +163,7 @@ mod integrity_tests { { // just an estimation of extra transaction bytes that are added to every transaction // (including signature, signed extensions extra and etc + in our case it includes - // all call arguments extept the proof itself) + // all call arguments except the proof itself) let base_tx_size = 512; // let's say we are relaying similar small messages and for every message we add more trie // nodes to the proof (x0.5 because we expect some nodes to be reused) diff --git a/bridges/bin/runtime-common/src/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/refund_relayer_extension.rs index 8e901d72821..455392a0a27 100644 --- a/bridges/bin/runtime-common/src/refund_relayer_extension.rs +++ b/bridges/bin/runtime-common/src/refund_relayer_extension.rs @@ -1538,7 +1538,7 @@ mod tests { } #[test] - fn validate_boosts_priority_of_message_delivery_transactons() { + fn validate_boosts_priority_of_message_delivery_transactions() { run_test(|| { initialize_environment(100, 100, 100); @@ -1568,7 +1568,7 @@ mod tests { } #[test] - fn validate_does_not_boost_priority_of_message_delivery_transactons_with_too_many_messages() { + fn validate_does_not_boost_priority_of_message_delivery_transactions_with_too_many_messages() { run_test(|| { initialize_environment(100, 100, 100); diff --git a/bridges/chains/chain-kusama/src/lib.rs b/bridges/chains/chain-kusama/src/lib.rs index e3b4d0520f6..a81004afe81 100644 --- a/bridges/chains/chain-kusama/src/lib.rs +++ b/bridges/chains/chain-kusama/src/lib.rs @@ -53,8 +53,8 @@ impl Chain for Kusama { impl ChainWithGrandpa for Kusama { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_KUSAMA_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/chains/chain-polkadot-bulletin/src/lib.rs b/bridges/chains/chain-polkadot-bulletin/src/lib.rs index f2eebf93124..f3d300567f2 100644 --- a/bridges/chains/chain-polkadot-bulletin/src/lib.rs +++ b/bridges/chains/chain-polkadot-bulletin/src/lib.rs @@ -43,7 +43,7 @@ use sp_runtime::{traits::DispatchInfoOf, transaction_validity::TransactionValidi pub use bp_polkadot_core::{ AccountAddress, AccountId, Balance, Block, BlockNumber, Hash, Hasher, Header, Nonce, Signature, SignedBlock, UncheckedExtrinsic, AVERAGE_HEADER_SIZE, EXTRA_STORAGE_PROOF_SIZE, - MAX_MANDATORY_HEADER_SIZE, REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY, + MAX_MANDATORY_HEADER_SIZE, REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY, }; /// Maximal number of GRANDPA authorities at Polkadot Bulletin chain. @@ -62,7 +62,7 @@ const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(90); // Re following constants - we are using the same values at Cumulus parachains. They are limited // by the maximal transaction weight/size. Since block limits at Bulletin Chain are larger than -// at the Cumulus Bridgeg Hubs, we could reuse the same values. +// at the Cumulus Bridge Hubs, we could reuse the same values. /// Maximal number of unrewarded relayer entries at inbound lane for Cumulus-based parachains. pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024; @@ -207,8 +207,8 @@ impl Chain for PolkadotBulletin { impl ChainWithGrandpa for PolkadotBulletin { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_POLKADOT_BULLETIN_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/chains/chain-polkadot/src/lib.rs b/bridges/chains/chain-polkadot/src/lib.rs index fc5e10308a8..00d35783a9b 100644 --- a/bridges/chains/chain-polkadot/src/lib.rs +++ b/bridges/chains/chain-polkadot/src/lib.rs @@ -55,8 +55,8 @@ impl Chain for Polkadot { impl ChainWithGrandpa for Polkadot { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_POLKADOT_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/chains/chain-rococo/src/lib.rs b/bridges/chains/chain-rococo/src/lib.rs index f1b256f0f09..2385dd2cbb2 100644 --- a/bridges/chains/chain-rococo/src/lib.rs +++ b/bridges/chains/chain-rococo/src/lib.rs @@ -53,8 +53,8 @@ impl Chain for Rococo { impl ChainWithGrandpa for Rococo { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_ROCOCO_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/chains/chain-westend/src/lib.rs b/bridges/chains/chain-westend/src/lib.rs index f03fd2160a7..b344b7f4bf9 100644 --- a/bridges/chains/chain-westend/src/lib.rs +++ b/bridges/chains/chain-westend/src/lib.rs @@ -53,8 +53,8 @@ impl Chain for Westend { impl ChainWithGrandpa for Westend { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_WESTEND_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/modules/grandpa/README.md b/bridges/modules/grandpa/README.md index 992bd2cc472..4a3099b8afc 100644 --- a/bridges/modules/grandpa/README.md +++ b/bridges/modules/grandpa/README.md @@ -10,7 +10,7 @@ It is used by the parachains light client (bridge parachains pallet) and by mess ## A Brief Introduction into GRANDPA Finality You can find detailed information on GRANDPA, by exploring its [repository](https://github.com/paritytech/finality-grandpa). -Here is the minimal reqiuired GRANDPA information to understand how pallet works. +Here is the minimal required GRANDPA information to understand how pallet works. Any Substrate chain may use different block authorship algorithms (like BABE or Aura) to determine block producers and generate blocks. This has nothing common with finality, though - the task of block authorship is to coordinate diff --git a/bridges/modules/grandpa/src/call_ext.rs b/bridges/modules/grandpa/src/call_ext.rs index e3c778b480b..4a7ebb3cc8d 100644 --- a/bridges/modules/grandpa/src/call_ext.rs +++ b/bridges/modules/grandpa/src/call_ext.rs @@ -205,7 +205,7 @@ pub(crate) fn submit_finality_proof_info_from_args, I: 'static>( // as an extra weight. let votes_ancestries_len = justification.votes_ancestries.len().saturated_into(); let extra_weight = - if votes_ancestries_len > T::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY { + if votes_ancestries_len > T::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY { T::WeightInfo::submit_finality_proof(precommits_len, votes_ancestries_len) } else { Weight::zero() @@ -396,11 +396,11 @@ mod tests { let finality_target = test_header(1); let mut justification_params = JustificationGeneratorParams { header: finality_target.clone(), - ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY, + ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY, ..Default::default() }; - // when there are `REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY` headers => no refund + // when there are `REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY` headers => no refund let justification = make_justification_for_header(justification_params.clone()); let call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof_ex { finality_target: Box::new(finality_target.clone()), @@ -409,7 +409,7 @@ mod tests { }); assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, Weight::zero()); - // when there are `REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1` headers => full refund + // when there are `REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY + 1` headers => full refund justification_params.ancestors += 1; let justification = make_justification_for_header(justification_params); let call_weight = ::WeightInfo::submit_finality_proof( diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index ce2c47da954..9e095651ef8 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -935,7 +935,7 @@ mod tests { } #[test] - fn succesfully_imports_header_with_valid_finality() { + fn successfully_imports_header_with_valid_finality() { run_test(|| { initialize_substrate_bridge(); @@ -1192,7 +1192,7 @@ mod tests { header.digest = change_log(0); let justification = make_justification_for_header(JustificationGeneratorParams { header: header.clone(), - ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1, + ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY + 1, ..Default::default() }); diff --git a/bridges/modules/grandpa/src/mock.rs b/bridges/modules/grandpa/src/mock.rs index 4318d663a2e..e689e520c92 100644 --- a/bridges/modules/grandpa/src/mock.rs +++ b/bridges/modules/grandpa/src/mock.rs @@ -87,7 +87,7 @@ impl Chain for TestBridgedChain { impl ChainWithGrandpa for TestBridgedChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = MAX_BRIDGED_AUTHORITIES; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; const MAX_MANDATORY_HEADER_SIZE: u32 = 256; const AVERAGE_HEADER_SIZE: u32 = 64; } diff --git a/bridges/modules/messages/src/inbound_lane.rs b/bridges/modules/messages/src/inbound_lane.rs index 966ec939e70..da1698e6e03 100644 --- a/bridges/modules/messages/src/inbound_lane.rs +++ b/bridges/modules/messages/src/inbound_lane.rs @@ -21,7 +21,7 @@ use crate::Config; use bp_messages::{ target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, - ReceivalResult, UnrewardedRelayer, + ReceptionResult, UnrewardedRelayer, }; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use frame_support::traits::Get; @@ -170,21 +170,21 @@ impl InboundLane { relayer_at_bridged_chain: &S::Relayer, nonce: MessageNonce, message_data: DispatchMessageData, - ) -> ReceivalResult { + ) -> ReceptionResult { let mut data = self.storage.get_or_init_data(); if Some(nonce) != data.last_delivered_nonce().checked_add(1) { - return ReceivalResult::InvalidNonce + return ReceptionResult::InvalidNonce } // if there are more unrewarded relayer entries than we may accept, reject this message if data.relayers.len() as MessageNonce >= self.storage.max_unrewarded_relayer_entries() { - return ReceivalResult::TooManyUnrewardedRelayers + return ReceptionResult::TooManyUnrewardedRelayers } // if there are more unconfirmed messages than we may accept, reject this message let unconfirmed_messages_count = nonce.saturating_sub(data.last_confirmed_nonce); if unconfirmed_messages_count > self.storage.max_unconfirmed_messages() { - return ReceivalResult::TooManyUnconfirmedMessages + return ReceptionResult::TooManyUnconfirmedMessages } // then, dispatch message @@ -207,7 +207,7 @@ impl InboundLane { }; self.storage.set_data(data); - ReceivalResult::Dispatched(dispatch_result) + ReceptionResult::Dispatched(dispatch_result) } } @@ -235,7 +235,7 @@ mod tests { nonce, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); } @@ -362,7 +362,7 @@ mod tests { 10, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::InvalidNonce + ReceptionResult::InvalidNonce ); assert_eq!(lane.storage.get_or_init_data().last_delivered_nonce(), 0); }); @@ -381,7 +381,7 @@ mod tests { current_nonce, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); } // Fails to dispatch new message from different than latest relayer. @@ -391,7 +391,7 @@ mod tests { max_nonce + 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::TooManyUnrewardedRelayers, + ReceptionResult::TooManyUnrewardedRelayers, ); // Fails to dispatch new messages from latest relayer. Prevents griefing attacks. assert_eq!( @@ -400,7 +400,7 @@ mod tests { max_nonce + 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::TooManyUnrewardedRelayers, + ReceptionResult::TooManyUnrewardedRelayers, ); }); } @@ -417,7 +417,7 @@ mod tests { current_nonce, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); } // Fails to dispatch new message from different than latest relayer. @@ -427,7 +427,7 @@ mod tests { max_nonce + 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::TooManyUnconfirmedMessages, + ReceptionResult::TooManyUnconfirmedMessages, ); // Fails to dispatch new messages from latest relayer. assert_eq!( @@ -436,7 +436,7 @@ mod tests { max_nonce + 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::TooManyUnconfirmedMessages, + ReceptionResult::TooManyUnconfirmedMessages, ); }); } @@ -451,7 +451,7 @@ mod tests { 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); assert_eq!( lane.receive_message::( @@ -459,7 +459,7 @@ mod tests { 2, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); assert_eq!( lane.receive_message::( @@ -467,7 +467,7 @@ mod tests { 3, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); assert_eq!( lane.storage.get_or_init_data().relayers, @@ -490,7 +490,7 @@ mod tests { 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); assert_eq!( lane.receive_message::( @@ -498,7 +498,7 @@ mod tests { 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::InvalidNonce, + ReceptionResult::InvalidNonce, ); }); } @@ -524,7 +524,7 @@ mod tests { 1, inbound_message_data(payload) ), - ReceivalResult::Dispatched(dispatch_result(1)) + ReceptionResult::Dispatched(dispatch_result(1)) ); }); } diff --git a/bridges/modules/messages/src/lib.rs b/bridges/modules/messages/src/lib.rs index a86cb326cf0..bc00db9eba5 100644 --- a/bridges/modules/messages/src/lib.rs +++ b/bridges/modules/messages/src/lib.rs @@ -47,7 +47,7 @@ pub use weights_ext::{ use crate::{ inbound_lane::{InboundLane, InboundLaneStorage}, - outbound_lane::{OutboundLane, OutboundLaneStorage, ReceivalConfirmationError}, + outbound_lane::{OutboundLane, OutboundLaneStorage, ReceptionConfirmationError}, }; use bp_messages::{ @@ -90,7 +90,7 @@ pub const LOG_TARGET: &str = "runtime::bridge-messages"; #[frame_support::pallet] pub mod pallet { use super::*; - use bp_messages::{ReceivalResult, ReceivedMessages}; + use bp_messages::{ReceivedMessages, ReceptionResult}; use bp_runtime::RangeInclusiveExt; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -376,13 +376,13 @@ pub mod pallet { // delivery transaction cost anyway. And base cost covers everything except // dispatch, so we have a balance here. let unspent_weight = match &receival_result { - ReceivalResult::Dispatched(dispatch_result) => { + ReceptionResult::Dispatched(dispatch_result) => { valid_messages += 1; dispatch_result.unspent_weight }, - ReceivalResult::InvalidNonce | - ReceivalResult::TooManyUnrewardedRelayers | - ReceivalResult::TooManyUnconfirmedMessages => message_dispatch_weight, + ReceptionResult::InvalidNonce | + ReceptionResult::TooManyUnrewardedRelayers | + ReceptionResult::TooManyUnconfirmedMessages => message_dispatch_weight, }; lane_messages_received_status.push(message.key.nonce, receival_result); @@ -455,7 +455,7 @@ pub mod pallet { last_delivered_nonce, &lane_data.relayers, ) - .map_err(Error::::ReceivalConfirmation)?; + .map_err(Error::::ReceptionConfirmation)?; if let Some(confirmed_messages) = confirmed_messages { // emit 'delivered' event @@ -563,7 +563,7 @@ pub mod pallet { /// The message someone is trying to work with (i.e. increase fee) is not yet sent. MessageIsNotYetSent, /// Error confirming messages receival. - ReceivalConfirmation(ReceivalConfirmationError), + ReceptionConfirmation(ReceptionConfirmationError), /// Error generated by the `OwnedBridgeModule` trait. BridgeModule(bp_runtime::OwnedBridgeModuleError), } @@ -923,7 +923,7 @@ mod tests { PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_LANE_ID_2, TEST_LANE_ID_3, TEST_RELAYER_A, TEST_RELAYER_B, }, - outbound_lane::ReceivalConfirmationError, + outbound_lane::ReceptionConfirmationError, }; use bp_messages::{ source_chain::MessagesBridge, BridgeMessagesCall, UnrewardedRelayer, @@ -950,11 +950,11 @@ mod tests { let outbound_lane = outbound_lane::(lane_id); let message_nonce = outbound_lane.data().latest_generated_nonce + 1; - let prev_enqueud_messages = outbound_lane.data().queued_messages().saturating_len(); + let prev_enqueued_messages = outbound_lane.data().queued_messages().saturating_len(); let valid_message = Pallet::::validate_message(lane_id, ®ULAR_PAYLOAD) .expect("validate_message has failed"); let artifacts = Pallet::::send_message(valid_message); - assert_eq!(artifacts.enqueued_messages, prev_enqueud_messages + 1); + assert_eq!(artifacts.enqueued_messages, prev_enqueued_messages + 1); // check event with assigned nonce assert_eq!( @@ -1541,7 +1541,7 @@ mod tests { } #[test] - fn actual_dispatch_weight_does_not_overlow() { + fn actual_dispatch_weight_does_not_overflow() { run_test(|| { let message1 = message(1, message_payload(0, u64::MAX / 2)); let message2 = message(2, message_payload(0, u64::MAX / 2)); @@ -1775,7 +1775,7 @@ mod tests { // returns `last_confirmed_nonce`; // 3) it means that we're going to confirm delivery of messages 1..=1; // 4) so the number of declared messages (see `UnrewardedRelayersState`) is `0` and - // numer of actually confirmed messages is `1`. + // number of actually confirmed messages is `1`. assert_noop!( Pallet::::receive_messages_delivery_proof( RuntimeOrigin::signed(1), @@ -1785,8 +1785,8 @@ mod tests { ))), UnrewardedRelayersState { last_delivered_nonce: 1, ..Default::default() }, ), - Error::::ReceivalConfirmation( - ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected + Error::::ReceptionConfirmation( + ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected ), ); }); diff --git a/bridges/modules/messages/src/outbound_lane.rs b/bridges/modules/messages/src/outbound_lane.rs index 431c2cfb7ee..acef5546d2a 100644 --- a/bridges/modules/messages/src/outbound_lane.rs +++ b/bridges/modules/messages/src/outbound_lane.rs @@ -53,7 +53,7 @@ pub type StoredMessagePayload = BoundedVec>::MaximalOu /// Result of messages receival confirmation. #[derive(Encode, Decode, RuntimeDebug, PartialEq, Eq, PalletError, TypeInfo)] -pub enum ReceivalConfirmationError { +pub enum ReceptionConfirmationError { /// Bridged chain is trying to confirm more messages than we have generated. May be a result /// of invalid bridged chain storage. FailedToConfirmFutureMessages, @@ -103,7 +103,7 @@ impl OutboundLane { max_allowed_messages: MessageNonce, latest_delivered_nonce: MessageNonce, relayers: &VecDeque>, - ) -> Result, ReceivalConfirmationError> { + ) -> Result, ReceptionConfirmationError> { let mut data = self.storage.data(); let confirmed_messages = DeliveredMessages { begin: data.latest_received_nonce.saturating_add(1), @@ -113,7 +113,7 @@ impl OutboundLane { return Ok(None) } if confirmed_messages.end > data.latest_generated_nonce { - return Err(ReceivalConfirmationError::FailedToConfirmFutureMessages) + return Err(ReceptionConfirmationError::FailedToConfirmFutureMessages) } if confirmed_messages.total_messages() > max_allowed_messages { // that the relayer has declared correct number of messages that the proof contains (it @@ -127,7 +127,7 @@ impl OutboundLane { confirmed_messages.total_messages(), max_allowed_messages, ); - return Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected) + return Err(ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected) } ensure_unrewarded_relayers_are_correct(confirmed_messages.end, relayers)?; @@ -176,24 +176,24 @@ impl OutboundLane { fn ensure_unrewarded_relayers_are_correct( latest_received_nonce: MessageNonce, relayers: &VecDeque>, -) -> Result<(), ReceivalConfirmationError> { +) -> Result<(), ReceptionConfirmationError> { let mut expected_entry_begin = relayers.front().map(|entry| entry.messages.begin); for entry in relayers { // unrewarded relayer entry must have at least 1 unconfirmed message // (guaranteed by the `InboundLane::receive_message()`) if entry.messages.end < entry.messages.begin { - return Err(ReceivalConfirmationError::EmptyUnrewardedRelayerEntry) + return Err(ReceptionConfirmationError::EmptyUnrewardedRelayerEntry) } // every entry must confirm range of messages that follows previous entry range // (guaranteed by the `InboundLane::receive_message()`) if expected_entry_begin != Some(entry.messages.begin) { - return Err(ReceivalConfirmationError::NonConsecutiveUnrewardedRelayerEntries) + return Err(ReceptionConfirmationError::NonConsecutiveUnrewardedRelayerEntries) } expected_entry_begin = entry.messages.end.checked_add(1); // entry can't confirm messages larger than `inbound_lane_data.latest_received_nonce()` // (guaranteed by the `InboundLane::receive_message()`) if entry.messages.end > latest_received_nonce { - return Err(ReceivalConfirmationError::FailedToConfirmFutureMessages) + return Err(ReceptionConfirmationError::FailedToConfirmFutureMessages) } } @@ -228,7 +228,7 @@ mod tests { fn assert_3_messages_confirmation_fails( latest_received_nonce: MessageNonce, relayers: &VecDeque>, - ) -> Result, ReceivalConfirmationError> { + ) -> Result, ReceptionConfirmationError> { run_test(|| { let mut lane = outbound_lane::(TEST_LANE_ID); lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); @@ -299,7 +299,7 @@ mod tests { fn confirm_delivery_rejects_nonce_larger_than_last_generated() { assert_eq!( assert_3_messages_confirmation_fails(10, &unrewarded_relayers(1..=10),), - Err(ReceivalConfirmationError::FailedToConfirmFutureMessages), + Err(ReceptionConfirmationError::FailedToConfirmFutureMessages), ); } @@ -314,7 +314,7 @@ mod tests { .chain(unrewarded_relayers(3..=3).into_iter()) .collect(), ), - Err(ReceivalConfirmationError::FailedToConfirmFutureMessages), + Err(ReceptionConfirmationError::FailedToConfirmFutureMessages), ); } @@ -330,7 +330,7 @@ mod tests { .chain(unrewarded_relayers(2..=3).into_iter()) .collect(), ), - Err(ReceivalConfirmationError::EmptyUnrewardedRelayerEntry), + Err(ReceptionConfirmationError::EmptyUnrewardedRelayerEntry), ); } @@ -345,7 +345,7 @@ mod tests { .chain(unrewarded_relayers(2..=2).into_iter()) .collect(), ), - Err(ReceivalConfirmationError::NonConsecutiveUnrewardedRelayerEntries), + Err(ReceptionConfirmationError::NonConsecutiveUnrewardedRelayerEntries), ); } @@ -409,11 +409,11 @@ mod tests { lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); assert_eq!( lane.confirm_delivery(0, 3, &unrewarded_relayers(1..=3)), - Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected), + Err(ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected), ); assert_eq!( lane.confirm_delivery(2, 3, &unrewarded_relayers(1..=3)), - Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected), + Err(ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected), ); assert_eq!( lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), diff --git a/bridges/modules/parachains/src/mock.rs b/bridges/modules/parachains/src/mock.rs index 3af3fd3e763..d9cbabf850e 100644 --- a/bridges/modules/parachains/src/mock.rs +++ b/bridges/modules/parachains/src/mock.rs @@ -261,7 +261,7 @@ impl Chain for TestBridgedChain { impl ChainWithGrandpa for TestBridgedChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = 16; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; const MAX_MANDATORY_HEADER_SIZE: u32 = 256; const AVERAGE_HEADER_SIZE: u32 = 64; } @@ -294,7 +294,7 @@ impl Chain for OtherBridgedChain { impl ChainWithGrandpa for OtherBridgedChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = 16; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; const MAX_MANDATORY_HEADER_SIZE: u32 = 256; const AVERAGE_HEADER_SIZE: u32 = 64; } diff --git a/bridges/modules/xcm-bridge-hub-router/src/lib.rs b/bridges/modules/xcm-bridge-hub-router/src/lib.rs index f219be78f9e..5d0be41b1b5 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/lib.rs @@ -427,7 +427,7 @@ mod tests { run_test(|| { Bridge::::put(uncongested_bridge(FixedU128::from_rational(125, 100))); - // it shold eventually decreased to one + // it should eventually decreased to one while XcmBridgeHubRouter::bridge().delivery_fee_factor > MINIMAL_DELIVERY_FEE_FACTOR { XcmBridgeHubRouter::on_initialize(One::one()); } diff --git a/bridges/modules/xcm-bridge-hub/Cargo.toml b/bridges/modules/xcm-bridge-hub/Cargo.toml index d910319d9bf..68ac32281f3 100644 --- a/bridges/modules/xcm-bridge-hub/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-xcm-bridge-hub" -description = "Module that adds dynamic bridges/lanes support to XCM infrastucture at the bridge hub." +description = "Module that adds dynamic bridges/lanes support to XCM infrastructure at the bridge hub." version = "0.2.0" authors.workspace = true edition.workspace = true diff --git a/bridges/primitives/header-chain/src/justification/mod.rs b/bridges/primitives/header-chain/src/justification/mod.rs index b32d8bdb5f1..d7c2cbf429e 100644 --- a/bridges/primitives/header-chain/src/justification/mod.rs +++ b/bridges/primitives/header-chain/src/justification/mod.rs @@ -83,7 +83,7 @@ impl GrandpaJustification { .saturating_add(HashOf::::max_encoded_len().saturated_into()); let max_expected_votes_ancestries_size = - C::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY.saturating_mul(C::AVERAGE_HEADER_SIZE); + C::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY.saturating_mul(C::AVERAGE_HEADER_SIZE); // justification is round number (u64=8b), a signed GRANDPA commit and the // `votes_ancestries` vector diff --git a/bridges/primitives/header-chain/src/justification/verification/mod.rs b/bridges/primitives/header-chain/src/justification/verification/mod.rs index c71149bf9c2..9df3511e103 100644 --- a/bridges/primitives/header-chain/src/justification/verification/mod.rs +++ b/bridges/primitives/header-chain/src/justification/verification/mod.rs @@ -318,7 +318,7 @@ trait JustificationVerifier { } // check that the cumulative weight of validators that voted for the justification target - // (or one of its descendents) is larger than the required threshold. + // (or one of its descendants) is larger than the required threshold. if cumulative_weight < threshold { return Err(Error::TooLowCumulativeWeight) } diff --git a/bridges/primitives/header-chain/src/lib.rs b/bridges/primitives/header-chain/src/lib.rs index 84a6a881a83..98fb9ff83d8 100644 --- a/bridges/primitives/header-chain/src/lib.rs +++ b/bridges/primitives/header-chain/src/lib.rs @@ -283,7 +283,7 @@ pub trait ChainWithGrandpa: Chain { /// ancestry and the pallet will accept such justification. The limit is only used to compute /// maximal refund amount and submitting justifications which exceed the limit, may be costly /// to submitter. - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32; /// Maximal size of the mandatory chain header. Mandatory header is the header that enacts new /// GRANDPA authorities set (so it has large digest inside). @@ -317,8 +317,8 @@ where const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ::WITH_CHAIN_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = ::MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - ::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + ::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = ::MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = ::AVERAGE_HEADER_SIZE; @@ -373,7 +373,7 @@ mod tests { impl ChainWithGrandpa for TestChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = "Test"; const MAX_AUTHORITIES_COUNT: u32 = 128; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 2; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 2; const MAX_MANDATORY_HEADER_SIZE: u32 = 100_000; const AVERAGE_HEADER_SIZE: u32 = 1_024; } diff --git a/bridges/primitives/messages/src/lib.rs b/bridges/primitives/messages/src/lib.rs index 51b3f25f715..c3f79b3ee38 100644 --- a/bridges/primitives/messages/src/lib.rs +++ b/bridges/primitives/messages/src/lib.rs @@ -289,27 +289,27 @@ pub struct ReceivedMessages { /// Id of the lane which is receiving messages. pub lane: LaneId, /// Result of messages which we tried to dispatch - pub receive_results: Vec<(MessageNonce, ReceivalResult)>, + pub receive_results: Vec<(MessageNonce, ReceptionResult)>, } impl ReceivedMessages { /// Creates new `ReceivedMessages` structure from given results. pub fn new( lane: LaneId, - receive_results: Vec<(MessageNonce, ReceivalResult)>, + receive_results: Vec<(MessageNonce, ReceptionResult)>, ) -> Self { ReceivedMessages { lane, receive_results } } /// Push `result` of the `message` delivery onto `receive_results` vector. - pub fn push(&mut self, message: MessageNonce, result: ReceivalResult) { + pub fn push(&mut self, message: MessageNonce, result: ReceptionResult) { self.receive_results.push((message, result)); } } /// Result of single message receival. #[derive(RuntimeDebug, Encode, Decode, PartialEq, Eq, Clone, TypeInfo)] -pub enum ReceivalResult { +pub enum ReceptionResult { /// Message has been received and dispatched. Note that we don't care whether dispatch has /// been successful or not - in both case message falls into this category. /// diff --git a/bridges/primitives/polkadot-core/src/lib.rs b/bridges/primitives/polkadot-core/src/lib.rs index df2836495bb..e83be59b238 100644 --- a/bridges/primitives/polkadot-core/src/lib.rs +++ b/bridges/primitives/polkadot-core/src/lib.rs @@ -71,7 +71,7 @@ pub const MAX_AUTHORITIES_COUNT: u32 = 1_256; /// justifications with any additional headers in votes ancestry, so reasonable headers may /// be set to zero. But we assume that there may be small GRANDPA lags, so we're leaving some /// reserve here. -pub const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 2; +pub const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 2; /// Average header size in `votes_ancestries` field of justification on Polkadot-like /// chains. diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index 9ba21a1cddf..4ec5a001a99 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -104,7 +104,7 @@ pub trait Chain: Send + Sync + 'static { const ID: ChainId; /// A type that fulfills the abstract idea of what a Substrate block number is. - // Constraits come from the associated Number type of `sp_runtime::traits::Header` + // Constraints come from the associated Number type of `sp_runtime::traits::Header` // See here for more info: // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Number // @@ -125,7 +125,7 @@ pub trait Chain: Send + Sync + 'static { + MaxEncodedLen; /// A type that fulfills the abstract idea of what a Substrate hash is. - // Constraits come from the associated Hash type of `sp_runtime::traits::Header` + // Constraints come from the associated Hash type of `sp_runtime::traits::Header` // See here for more info: // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Hash type Hash: Parameter @@ -143,7 +143,7 @@ pub trait Chain: Send + Sync + 'static { /// A type that fulfills the abstract idea of what a Substrate hasher (a type /// that produces hashes) is. - // Constraits come from the associated Hashing type of `sp_runtime::traits::Header` + // Constraints come from the associated Hashing type of `sp_runtime::traits::Header` // See here for more info: // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Hashing type Hasher: HashT; diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index 850318923dc..c9c5c941291 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -56,7 +56,7 @@ mod chain; mod storage_proof; mod storage_types; -// Re-export macro to aviod include paste dependency everywhere +// Re-export macro to avoid include paste dependency everywhere pub use sp_runtime::paste; /// Use this when something must be shared among all instances. @@ -461,7 +461,7 @@ macro_rules! generate_static_str_provider { }; } -/// Error message that is only dispayable in `std` environment. +/// Error message that is only displayable in `std` environment. #[derive(Encode, Decode, Clone, Eq, PartialEq, PalletError, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct StrippableError { diff --git a/bridges/primitives/test-utils/src/lib.rs b/bridges/primitives/test-utils/src/lib.rs index 1d80890779b..f4fe4a242e7 100644 --- a/bridges/primitives/test-utils/src/lib.rs +++ b/bridges/primitives/test-utils/src/lib.rs @@ -88,7 +88,7 @@ pub fn make_default_justification(header: &H) -> GrandpaJustificatio /// Generate justifications in a way where we are able to tune the number of pre-commits /// and vote ancestries which are included in the justification. /// -/// This is useful for benchmarkings where we want to generate valid justifications with +/// This is useful for benchmarks where we want to generate valid justifications with /// a specific number of pre-commits (tuned with the number of "authorities") and/or a specific /// number of vote ancestries (tuned with the "votes" parameter). /// diff --git a/bridges/scripts/verify-pallets-build.sh b/bridges/scripts/verify-pallets-build.sh index 4eefaa8efa0..9c57a2a3c47 100755 --- a/bridges/scripts/verify-pallets-build.sh +++ b/bridges/scripts/verify-pallets-build.sh @@ -134,7 +134,7 @@ cargo check -p bridge-runtime-common cargo check -p bridge-runtime-common --features runtime-benchmarks cargo check -p bridge-runtime-common --features integrity-test -# we're removing lock file after all chechs are done. Otherwise we may use different +# we're removing lock file after all checks are done. Otherwise we may use different # Substrate/Polkadot/Cumulus commits and our checks will fail rm -f $BRIDGES_FOLDER/Cargo.lock diff --git a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs index fc2ab2fbb58..f57f5199020 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs @@ -10,7 +10,7 @@ //! //! * [`Call::force_checkpoint`]: Set the initial trusted consensus checkpoint. //! * [`Call::set_operating_mode`]: Set the operating mode of the pallet. Can be used to disable -//! processing of conensus updates. +//! processing of consensus updates. //! //! ## Consensus Updates //! diff --git a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs index 4a7b7b45886..20a184490fd 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs @@ -249,7 +249,7 @@ pub fn execution_header_pruning() { stored_hashes.push(hash); } - // We should have only stored upto `execution_header_prune_threshold` + // We should have only stored up to `execution_header_prune_threshold` assert_eq!( ExecutionHeaders::::iter().count() as u32, execution_header_prune_threshold diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token_with_insufficient_fee.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token_with_insufficient_fee.rs index 82ff2283101..dfda0b2b427 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token_with_insufficient_fee.rs +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token_with_insufficient_fee.rs @@ -9,7 +9,7 @@ use snowbridge_beacon_primitives::CompactExecutionHeader; use snowbridge_core::inbound::{Log, Message, Proof}; use sp_std::vec; -pub fn make_register_token_with_infufficient_fee_message() -> InboundQueueFixture { +pub fn make_register_token_with_insufficient_fee_message() -> InboundQueueFixture { InboundQueueFixture { execution_header: CompactExecutionHeader{ parent_hash: hex!("998e81dc6df788a920b67e058fbde0dc3f4ec6f11f3f7cd8c3148e6d99584885").into(), diff --git a/bridges/testing/environments/rococo-westend/rococo.zndsl b/bridges/testing/environments/rococo-westend/rococo.zndsl index 5b49c7c632f..a75286445a2 100644 --- a/bridges/testing/environments/rococo-westend/rococo.zndsl +++ b/bridges/testing/environments/rococo-westend/rococo.zndsl @@ -1,7 +1,7 @@ -Description: Check if the with-Westend GRANPDA pallet was initialized at Rococo BH +Description: Check if the with-Westend GRANDPA pallet was initialized at Rococo BH Network: ./bridge_hub_rococo_local_network.toml Creds: config -# relay is already started - let's wait until with-Westend GRANPDA pallet is initialized at Rococo +# relay is already started - let's wait until with-Westend GRANDPA pallet is initialized at Rococo bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/best-finalized-header-at-bridged-chain.js with "Westend,0" within 400 seconds diff --git a/bridges/testing/environments/rococo-westend/westend.zndsl b/bridges/testing/environments/rococo-westend/westend.zndsl index 07968838852..21d4ebf3b05 100644 --- a/bridges/testing/environments/rococo-westend/westend.zndsl +++ b/bridges/testing/environments/rococo-westend/westend.zndsl @@ -1,6 +1,6 @@ -Description: Check if the with-Rococo GRANPDA pallet was initialized at Westend BH +Description: Check if the with-Rococo GRANDPA pallet was initialized at Westend BH Network: ./bridge_hub_westend_local_network.toml Creds: config -# relay is already started - let's wait until with-Rococo GRANPDA pallet is initialized at Westend +# relay is already started - let's wait until with-Rococo GRANDPA pallet is initialized at Westend bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds diff --git a/bridges/testing/framework/utils/generate_hex_encoded_call/index.js b/bridges/testing/framework/utils/generate_hex_encoded_call/index.js index 30f89d754ce..c8e361b25a9 100644 --- a/bridges/testing/framework/utils/generate_hex_encoded_call/index.js +++ b/bridges/testing/framework/utils/generate_hex_encoded_call/index.js @@ -126,36 +126,36 @@ if (!process.argv[2] || !process.argv[3]) { } const type = process.argv[2]; -const rpcEnpoint = process.argv[3]; +const rpcEndpoint = process.argv[3]; const output = process.argv[4]; const inputArgs = process.argv.slice(5, process.argv.length); console.log(`Generating hex-encoded call data for:`); console.log(` type: ${type}`); -console.log(` rpcEnpoint: ${rpcEnpoint}`); +console.log(` rpcEndpoint: ${rpcEndpoint}`); console.log(` output: ${output}`); console.log(` inputArgs: ${inputArgs}`); switch (type) { case 'remark-with-event': - remarkWithEvent(rpcEnpoint, output); + remarkWithEvent(rpcEndpoint, output); break; case 'add-exporter-config': - addExporterConfig(rpcEnpoint, output, inputArgs[0], inputArgs[1]); + addExporterConfig(rpcEndpoint, output, inputArgs[0], inputArgs[1]); break; case 'remove-exporter-config': - removeExporterConfig(rpcEnpoint, output, inputArgs[0], inputArgs[1]); + removeExporterConfig(rpcEndpoint, output, inputArgs[0], inputArgs[1]); break; case 'add-universal-alias': - addUniversalAlias(rpcEnpoint, output, inputArgs[0], inputArgs[1]); + addUniversalAlias(rpcEndpoint, output, inputArgs[0], inputArgs[1]); break; case 'add-reserve-location': - addReserveLocation(rpcEnpoint, output, inputArgs[0]); + addReserveLocation(rpcEndpoint, output, inputArgs[0]); break; case 'force-create-asset': - forceCreateAsset(rpcEnpoint, output, inputArgs[0], inputArgs[1], inputArgs[2], inputArgs[3]); + forceCreateAsset(rpcEndpoint, output, inputArgs[0], inputArgs[1], inputArgs[2], inputArgs[3]); break; case 'force-xcm-version': - forceXcmVersion(rpcEnpoint, output, inputArgs[0], inputArgs[1]); + forceXcmVersion(rpcEndpoint, output, inputArgs[0], inputArgs[1]); break; case 'check': console.log(`Checking nodejs installation, if you see this everything is ready!`); diff --git a/bridges/testing/run-tests.sh b/bridges/testing/run-tests.sh index 6149d991265..fd12b57f533 100755 --- a/bridges/testing/run-tests.sh +++ b/bridges/testing/run-tests.sh @@ -30,7 +30,7 @@ done export POLKADOT_SDK_PATH=`realpath $(dirname "$0")/../..` export BRIDGE_TESTS_FOLDER=$POLKADOT_SDK_PATH/bridges/testing/tests -# set pathc to binaries +# set path to binaries if [ "$ZOMBIENET_DOCKER_PATHS" -eq 1 ]; then export POLKADOT_BINARY=/usr/local/bin/polkadot export POLKADOT_PARACHAIN_BINARY=/usr/local/bin/polkadot-parachain diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh index 7d5b8d92736..3a604b3876d 100755 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh +++ b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh @@ -24,7 +24,7 @@ echo -e "Sleeping 90s before starting relayer ...\n" sleep 90 ${BASH_SOURCE%/*}/../../environments/rococo-westend/start_relayer.sh $rococo_dir $westend_dir relayer_pid -# Sometimes the relayer syncs multiple parachain heads in the begining leading to test failures. +# Sometimes the relayer syncs multiple parachain heads in the beginning leading to test failures. # See issue: https://github.com/paritytech/parity-bridges-common/issues/2838. # TODO: Remove this sleep after the issue is fixed. echo -e "Sleeping 180s before runing the tests ...\n" diff --git a/cumulus/client/consensus/common/src/level_monitor.rs b/cumulus/client/consensus/common/src/level_monitor.rs index 270e3f57ae5..fb4b0498f68 100644 --- a/cumulus/client/consensus/common/src/level_monitor.rs +++ b/cumulus/client/consensus/common/src/level_monitor.rs @@ -158,7 +158,7 @@ where /// the limit passed to the constructor. /// /// If the given level is found to have a number of blocks greater than or equal the limit - /// then the limit is enforced by chosing one (or more) blocks to remove. + /// then the limit is enforced by choosing one (or more) blocks to remove. /// /// The removal strategy is driven by the block freshness. /// diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index bfb95ae388a..7816d3a4c40 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -832,7 +832,7 @@ fn restore_limit_monitor() { .collect::>(); // Scenario before limit application (with B11 imported as best) - // Import order (freshess): B00, B10, B11, B12, B20, B21 + // Import order (freshness): B00, B10, B11, B12, B20, B21 // // B00 --+-- B10 --+-- B20 // | +-- B21 diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index 32aba6c8993..0ca21749c3e 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -18,7 +18,7 @@ //! //! A parachain needs to build PoVs that are send to the relay chain to progress. These PoVs are //! erasure encoded and one piece of it is stored by each relay chain validator. As the relay chain -//! decides on which PoV per parachain to include and thus, to progess the parachain it can happen +//! decides on which PoV per parachain to include and thus, to progress the parachain it can happen //! that the block corresponding to this PoV isn't propagated in the parachain network. This can //! have several reasons, either a malicious collator that managed to include its own PoV and //! doesn't want to share it with the rest of the network or maybe a collator went down before it @@ -338,8 +338,8 @@ where let mut blocks_to_delete = vec![hash]; while let Some(delete) = blocks_to_delete.pop() { - if let Some(childs) = self.waiting_for_parent.remove(&delete) { - blocks_to_delete.extend(childs.iter().map(BlockT::hash)); + if let Some(children) = self.waiting_for_parent.remove(&delete) { + blocks_to_delete.extend(children.iter().map(BlockT::hash)); } } self.clear_waiting_recovery(&hash); @@ -448,7 +448,7 @@ where /// Import the given `block`. /// - /// This will also recursivley drain `waiting_for_parent` and import them as well. + /// This will also recursively drain `waiting_for_parent` and import them as well. fn import_block(&mut self, block: Block) { let mut blocks = VecDeque::new(); @@ -495,7 +495,7 @@ where tracing::debug!( target: LOG_TARGET, block_hash = ?hash, - "Cound not recover. Block was never announced as candidate" + "Could not recover. Block was never announced as candidate" ); return }, diff --git a/cumulus/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/cumulus/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index b716feef1c9..48d35dd3a55 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -293,7 +293,8 @@ impl ReconnectingWebsocketWorker { /// listeners. If an error occurs during sending, the receiver has been closed and we remove /// the sender from the list. /// - Find a new valid RPC server to connect to in case the websocket connection is terminated. - /// If the worker is not able to connec to an RPC server from the list, the worker shuts down. + /// If the worker is not able to connect to an RPC server from the list, the worker shuts + /// down. pub async fn run(mut self) { let mut pending_requests = FuturesUnordered::new(); diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 1c01ef33c7e..54a1def5960 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -464,7 +464,7 @@ pub mod pallet { // One complication here, is that the `host_configuration` is updated by an inherent // and those are processed after the block initialization phase. Therefore, we have to // be content only with the configuration as per the previous block. That means that - // the configuration can be either stale (or be abscent altogether in case of the + // the configuration can be either stale (or be absent altogether in case of the // beginning of the chain). // // In order to mitigate this, we do the following. At the time, we are only concerned diff --git a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs index 5519d1521ea..60eccfb072f 100644 --- a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs +++ b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs @@ -90,7 +90,7 @@ pub enum Error { DmqMqcHead(ReadEntryErr), /// Relay dispatch queue cannot be extracted. RelayDispatchQueueRemainingCapacity(ReadEntryErr), - /// The hrmp inress channel index cannot be extracted. + /// The hrmp ingress channel index cannot be extracted. HrmpIngressChannelIndex(ReadEntryErr), /// The hrmp egress channel index cannot be extracted. HrmpEgressChannelIndex(ReadEntryErr), diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index e92169be16b..b4cd925d540 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -462,7 +462,7 @@ impl Pallet { // Max message size refers to aggregates, or pages. Not to individual fragments. let max_message_size = channel_info.max_message_size as usize; let format_size = format.encoded_size(); - // We check the encoded fragment length plus the format size agains the max message size + // We check the encoded fragment length plus the format size against the max message size // because the format is concatenated if a new page is needed. let size_to_check = encoded_fragment .len() diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs index 2acccb9649b..e5378b35f5e 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs @@ -20,7 +20,7 @@ use sp_core::{sr25519, storage::Storage}; // Cumulus use emulated_integration_tests_common::{ accounts, build_genesis_storage, collators, get_account_id_from_seed, - PenpalSiblingSovereigAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, + PenpalSiblingSovereignAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, SAFE_XCM_VERSION, }; use parachains_common::{AccountId, Balance}; @@ -76,7 +76,7 @@ pub fn genesis() -> Storage { // Penpal's teleportable asset representation ( PenpalTeleportableAssetLocation::get(), - PenpalSiblingSovereigAccount::get(), + PenpalSiblingSovereignAccount::get(), true, ED, ), diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs index e30529aff42..219d1306906 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs @@ -20,7 +20,7 @@ use sp_core::{sr25519, storage::Storage}; // Cumulus use emulated_integration_tests_common::{ accounts, build_genesis_storage, collators, get_account_id_from_seed, - PenpalSiblingSovereigAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, + PenpalSiblingSovereignAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, SAFE_XCM_VERSION, }; use parachains_common::{AccountId, Balance}; @@ -72,7 +72,7 @@ pub fn genesis() -> Storage { // Penpal's teleportable asset representation ( PenpalTeleportableAssetLocation::get(), - PenpalSiblingSovereigAccount::get(), + PenpalSiblingSovereignAccount::get(), true, ED, ), diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs index 48901647fd0..d81ab8143dd 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs @@ -31,8 +31,8 @@ pub const PARA_ID_B: u32 = 2001; pub const ED: Balance = penpal_runtime::EXISTENTIAL_DEPOSIT; parameter_types! { - pub PenpalSudoAcccount: AccountId = get_account_id_from_seed::("Alice"); - pub PenpalAssetOwner: AccountId = PenpalSudoAcccount::get(); + pub PenpalSudoAccount: AccountId = get_account_id_from_seed::("Alice"); + pub PenpalAssetOwner: AccountId = PenpalSudoAccount::get(); } pub fn genesis(para_id: u32) -> Storage { @@ -66,7 +66,7 @@ pub fn genesis(para_id: u32) -> Storage { safe_xcm_version: Some(SAFE_XCM_VERSION), ..Default::default() }, - sudo: penpal_runtime::SudoConfig { key: Some(PenpalSudoAcccount::get()) }, + sudo: penpal_runtime::SudoConfig { key: Some(PenpalSudoAccount::get()) }, assets: penpal_runtime::AssetsConfig { assets: vec![( penpal_runtime::xcm_config::TELEPORTABLE_ASSET_ID, diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index 651b3a52306..0b49c7a3e09 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -14,7 +14,7 @@ // limitations under the License. mod genesis; -pub use genesis::{genesis, PenpalAssetOwner, PenpalSudoAcccount, ED, PARA_ID_A, PARA_ID_B}; +pub use genesis::{genesis, PenpalAssetOwner, PenpalSudoAccount, ED, PARA_ID_A, PARA_ID_B}; pub use penpal_runtime::xcm_config::{ CustomizableAssetFromSystemAssetHub, LocalTeleportableToAssetHub, XcmConfig, }; diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 5fc08dff32c..ae69bf991e5 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -114,7 +114,7 @@ where .expect("Bridge message does not exist") .into(); let payload = Vec::::decode(&mut &encoded_payload[..]) - .expect("Decodign XCM message failed"); + .expect("Decoding XCM message failed"); let id: u32 = LaneIdWrapper(*lane).into(); let message = BridgeMessage { id, nonce, payload }; @@ -265,7 +265,7 @@ macro_rules! impl_assert_events_helpers_for_relay_chain { $crate::impls::assert_expected_events!( Self, vec![ - // XCM is successfully received and proccessed + // XCM is successfully received and processed [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { origin: $crate::impls::AggregateMessageOrigin::Ump($crate::impls::UmpQueueId::Para(id)), weight_used, @@ -343,7 +343,7 @@ macro_rules! impl_hrmp_channels_helpers_for_relay_chain { ::Runtime, >::contains_key(&channel_id); - // Check the HRMP channel has been successfully registrered + // Check the HRMP channel has been successfully registered assert!(hrmp_channel_exist) }); } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 40204ca297a..cbde0642f1a 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -67,7 +67,7 @@ parameter_types! { xcm::v3::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into()), ] ); - pub PenpalSiblingSovereigAccount: AccountId = Sibling::from(PENPAL_ID).into_account_truncating(); + pub PenpalSiblingSovereignAccount: AccountId = Sibling::from(PENPAL_ID).into_account_truncating(); } /// Helper function to generate a crypto pair from seed diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 0a5956dedfd..a0738839087 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -1052,7 +1052,7 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { // fund the Parachain Origin's SA on Relay Chain with the native tokens held in reserve Rococo::fund_accounts(vec![(sov_of_sender_on_relay.into(), amount_to_send * 2)]); - // Init values for Parachain Desitnation + // Init values for Parachain Destination let receiver = PenpalBReceiver::get(); // Init Test diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 64ad15ca312..a26dfef8e8e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -215,7 +215,7 @@ fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { let system_para_native_asset_location = v3::Location::try_from(RelayLocation::get()).expect("conversion works"); let reservable_asset_location = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("coversion works"); + v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); PenpalA::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8799))); assert_expected_events!( PenpalA, @@ -246,7 +246,7 @@ fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { fn system_para_to_para_assets_receiver_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; let system_para_asset_location = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("coversion works"); + v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); PenpalA::assert_xcmp_queue_success(None); assert_expected_events!( PenpalA, @@ -1054,7 +1054,7 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { // fund the Parachain Origin's SA on Relay Chain with the native tokens held in reserve Westend::fund_accounts(vec![(sov_of_sender_on_relay.into(), amount_to_send * 2)]); - // Init values for Parachain Desitnation + // Init values for Parachain Destination let receiver = PenpalBReceiver::get(); // Init Test diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index 1e74d63e1d5..26b82375e07 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -23,7 +23,7 @@ use rococo_westend_system_emulated_network::BridgeHubRococoParaSender as BridgeH use snowbridge_core::outbound::OperatingMode; use snowbridge_pallet_inbound_queue_fixtures::{ register_token::make_register_token_message, - register_token_with_insufficient_fee::make_register_token_with_infufficient_fee_message, + register_token_with_insufficient_fee::make_register_token_with_insufficient_fee_message, send_token::make_send_token_message, send_token_to_penpal::make_send_token_to_penpal_message, InboundQueueFixture, }; @@ -514,7 +514,7 @@ fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() { type RuntimeEvent = ::RuntimeEvent; // Construct RegisterToken message and sent to inbound queue - let message = make_register_token_with_infufficient_fee_message(); + let message = make_register_token_with_insufficient_fee_message(); send_inbound_message(message).unwrap(); assert_expected_events!( diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index c438361cc17..47c3ed36888 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -192,7 +192,7 @@ pub type ForeignFungiblesTransactor = FungiblesAdapter< LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): AccountId, - // We dont need to check teleports here. + // We don't need to check teleports here. NoChecking, // The account to use for tracking teleports. CheckingAccount, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index c993d61545a..7d3ed650e6b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -185,7 +185,7 @@ pub type ForeignFungiblesTransactor = FungiblesAdapter< LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): AccountId, - // We dont need to check teleports here. + // We don't need to check teleports here. NoChecking, // The account to use for tracking teleports. CheckingAccount, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index 3ba9b9587d8..6696cb23223 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -618,7 +618,7 @@ fn test_asset_xcm_take_first_trader_with_refund() { // We actually use half of the weight let weight_used = bought / 2; - // Make sure refurnd works. + // Make sure refund works. let amount_refunded = WeightToFee::weight_to_fee(&(bought - weight_used)); assert_eq!( @@ -745,7 +745,7 @@ fn test_that_buying_ed_refund_does_not_refund_for_take_first_trader() { // Buy weight should work assert_ok!(trader.buy_weight(bought, asset.into(), &ctx)); - // Should return None. We have a specific check making sure we dont go below ED for + // Should return None. We have a specific check making sure we don't go below ED for // drop payment assert_eq!(trader.refund_weight(bought, &ctx), None); diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 2f2624d8e52..884b71369e7 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -1120,7 +1120,7 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor AssetId: Clone, AssetIdConverter: MaybeEquivalence, { - // foreign parachain with the same consenus currency as asset + // foreign parachain with the same consensus currency as asset let foreign_asset_id_location = Location::new(1, [Parachain(2222), GeneralIndex(1234567)]); let asset_id = AssetIdConverter::convert(&foreign_asset_id_location).unwrap(); diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 1cce3b647cf..0b2364dbb8b 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -386,7 +386,7 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< existential_deposit, ); - // create foreign asset for wrapped/derivated representation + // create foreign asset for wrapped/derived representation assert_ok!( >::force_create( RuntimeHelper::::root_origin(), diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index ad369583f07..cd5f1ad3272 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -456,7 +456,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_collator_selection, CollatorSelection] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] ); @@ -644,7 +644,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -680,7 +680,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< xcm_config::XcmConfig, diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index c76611ad2a3..e840a40f5ac 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -344,7 +344,7 @@ parameter_types! { pub const StakingAdminBodyId: BodyId = BodyId::Defense; } -/// We allow Root and the `StakingAdmi` to execute privileged collator selection operations. +/// We allow Root and the `StakingAdmin` to execute privileged collator selection operations. pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< EnsureRoot, EnsureXcm>, @@ -456,7 +456,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_collator_selection, CollatorSelection] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] ); @@ -644,7 +644,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -680,7 +680,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< xcm_config::XcmConfig, diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index 639bfd95834..7b8e40e0428 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -145,7 +145,7 @@ pub type ForeignFungiblesTransactor = FungiblesAdapter< LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): AccountId, - // We dont need to check teleports here. + // We don't need to check teleports here. NoChecking, // The account to use for tracking teleports. CheckingAccount, diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 9ba7b7876b3..ac9c6b6f978 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -135,7 +135,7 @@ fn runtime(id: &str) -> Runtime { fn load_spec(id: &str) -> std::result::Result, String> { let (id, _, para_id) = extract_parachain_id(id); Ok(match id { - // - Defaul-like + // - Default-like "staging" => Box::new(chain_spec::rococo_parachain::staging_rococo_parachain_local_config()), "tick" => Box::new(GenericChainSpec::from_json_bytes( diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index 5dddc92e395..c09c12d7a0a 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -421,7 +421,7 @@ mod tests { .unwrap(); assert_eq!(pre, Some(100)); - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); assert_ok!(StorageWeightReclaim::::post_dispatch( @@ -456,7 +456,7 @@ mod tests { .unwrap(); assert_eq!(pre, Some(100)); - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); assert_ok!(StorageWeightReclaim::::post_dispatch( @@ -500,7 +500,7 @@ mod tests { &Ok(()) )); // `CheckWeight` gets called after `StorageWeightReclaim` this time. - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); @@ -536,7 +536,7 @@ mod tests { &Ok(()) )); // `CheckWeight` gets called after `StorageWeightReclaim` this time. - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); diff --git a/cumulus/primitives/timestamp/src/lib.rs b/cumulus/primitives/timestamp/src/lib.rs index 535c4a2a726..e6aba6d0bb7 100644 --- a/cumulus/primitives/timestamp/src/lib.rs +++ b/cumulus/primitives/timestamp/src/lib.rs @@ -22,7 +22,7 @@ //! access to any clock from the runtime the timestamp is always passed as an inherent into the //! runtime. To check this inherent when validating the block, we will use the relay chain slot. As //! the relay chain slot is derived from a timestamp, we can easily convert it back to a timestamp -//! by muliplying it with the slot duration. By comparing the relay chain slot derived timestamp +//! by multiplying it with the slot duration. By comparing the relay chain slot derived timestamp //! with the timestamp we can ensure that the parachain timestamp is reasonable. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/cumulus/primitives/utility/src/lib.rs b/cumulus/primitives/utility/src/lib.rs index abc391bdcb8..d5d411356dc 100644 --- a/cumulus/primitives/utility/src/lib.rs +++ b/cumulus/primitives/utility/src/lib.rs @@ -141,7 +141,7 @@ impl< ) -> Result { log::trace!(target: "xcm::weight", "TakeFirstAssetTrader::buy_weight weight: {:?}, payment: {:?}, context: {:?}", weight, payment, context); - // Make sure we dont enter twice + // Make sure we don't enter twice if self.0.is_some() { return Err(XcmError::NotWithdrawable) } @@ -176,7 +176,7 @@ impl< // Convert to the same kind of asset, with the required fungible balance let required = first.id.clone().into_asset(asset_balance.into()); - // Substract payment + // Subtract payment let unused = payment.checked_sub(required.clone()).map_err(|_| XcmError::TooExpensive)?; // record weight and asset @@ -203,7 +203,7 @@ impl< // Calculate asset_balance // This read should have already be cached in buy_weight - let (asset_balance, outstanding_minus_substracted) = + let (asset_balance, outstanding_minus_subtracted) = FeeCharger::charge_weight_in_fungibles(local_asset_id, weight).ok().map( |asset_balance| { // Require at least a drop of minimum_balance @@ -221,16 +221,15 @@ impl< )?; // Convert balances into u128 - let outstanding_minus_substracted: u128 = - outstanding_minus_substracted.saturated_into(); + let outstanding_minus_subtracted: u128 = outstanding_minus_subtracted.saturated_into(); let asset_balance: u128 = asset_balance.saturated_into(); - // Construct outstanding_concrete_asset with the same location id and substracted + // Construct outstanding_concrete_asset with the same location id and subtracted // balance let outstanding_concrete_asset: Asset = - (id.clone(), outstanding_minus_substracted).into(); + (id.clone(), outstanding_minus_subtracted).into(); - // Substract from existing weight and balance + // Subtract from existing weight and balance weight_outstanding = weight_outstanding.saturating_sub(weight); // Override AssetTraderRefunder @@ -263,9 +262,10 @@ impl< } } -/// XCM fee depositor to which we implement the TakeRevenue trait -/// It receives a Transact implemented argument, a 32 byte convertible acocuntId, and the fee -/// receiver account FungiblesMutateAdapter should be identical to that implemented by WithdrawAsset +/// XCM fee depositor to which we implement the `TakeRevenue` trait. +/// It receives a `Transact` implemented argument and a 32 byte convertible `AccountId`, and the fee +/// receiver account's `FungiblesMutateAdapter` should be identical to that implemented by +/// `WithdrawAsset`. pub struct XcmFeesTo32ByteAccount( PhantomData<(FungiblesMutateAdapter, AccountId, ReceiverAccount)>, ); @@ -763,7 +763,8 @@ mod test_trader { /// Implementation of `xcm_builder::EnsureDelivery` which helps to ensure delivery to the /// parent relay chain. Deposits existential deposit for origin (if needed). /// Deposits estimated fee to the origin account (if needed). -/// Allows to trigger additional logic for specific `ParaId` (e.g. open HRMP channel) (if neeeded). +/// Allows triggering of additional logic for a specific `ParaId` (e.g. to open an HRMP channel) if +/// needed. #[cfg(feature = "runtime-benchmarks")] pub struct ToParentDeliveryHelper( sp_std::marker::PhantomData<(XcmConfig, ExistentialDeposit, PriceForDelivery)>, diff --git a/cumulus/scripts/scale_encode_genesis/index.js b/cumulus/scripts/scale_encode_genesis/index.js index f612e6da79d..c6600e40636 100644 --- a/cumulus/scripts/scale_encode_genesis/index.js +++ b/cumulus/scripts/scale_encode_genesis/index.js @@ -19,14 +19,14 @@ async function connect(endpoint, types = {}) { } if (!process.argv[2] || !process.argv[3]) { - console.log("usage: node generate_keys [rpc enpoint]"); + console.log("usage: node generate_keys [rpc endpoint]"); exit(); } const input = process.argv[2]; const output = process.argv[3]; // default to localhost and the default Substrate port -const rpcEnpoint = process.argv[4] || "ws://localhost:9944"; +const rpcEndpoint = process.argv[4] || "ws://localhost:9944"; console.log("Processing", input, output); fs.readFile(input, "utf8", (err, data) => { @@ -38,8 +38,8 @@ fs.readFile(input, "utf8", (err, data) => { const genesis = JSON.parse(data); console.log("loaded genesis, length = ", genesis.length); - console.log(`Connecting to RPC endpoint: ${rpcEnpoint}`); - connect(rpcEnpoint) + console.log(`Connecting to RPC endpoint: ${rpcEndpoint}`); + connect(rpcEndpoint) .then((api) => { console.log('Connected'); const setStorage = api.tx.system.setStorage(genesis); diff --git a/cumulus/scripts/temp_parachain_types.json b/cumulus/scripts/temp_parachain_types.json index f550a677445..2509d32be9f 100644 --- a/cumulus/scripts/temp_parachain_types.json +++ b/cumulus/scripts/temp_parachain_types.json @@ -54,7 +54,7 @@ "validity_votes": "Vec", "validator_indices": "BitVec" }, - "CandidatePendingAvailablility": { + "CandidatePendingAvailability": { "core": "u32", "descriptor": "CandidateDescriptor", "availability_votes": "BitVec", diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 8e3569b02a1..5127b63f271 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -139,7 +139,7 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } -/// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. +/// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. /// This is used to limit the maximal weight of a single extrinsic. const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index a614863803e..d95f733969b 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -47,7 +47,7 @@ fn create_extrinsics( src_accounts: &[sr25519::Pair], dst_accounts: &[sr25519::Pair], ) -> (usize, Vec) { - // Add as many tranfer extrinsics as possible into a single block. + // Add as many transfer extrinsics as possible into a single block. let mut block_builder = BlockBuilderBuilder::new(client) .on_parent_block(client.chain_info().best_hash) .with_parent_block_number(client.chain_info().best_number) diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 3554a383f21..3af3901d175 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -735,7 +735,7 @@ pub fn node_config( tokio_handle: tokio::runtime::Handle, key: Sr25519Keyring, nodes: Vec, - nodes_exlusive: bool, + nodes_exclusive: bool, para_id: ParaId, is_collator: bool, endowed_accounts: Vec, @@ -759,7 +759,7 @@ pub fn node_config( None, ); - if nodes_exlusive { + if nodes_exclusive { network_config.default_peers_set.reserved_nodes = nodes; network_config.default_peers_set.non_reserved_mode = sc_network::config::NonReservedPeerMode::Deny; diff --git a/docker/dockerfiles/binary_injected.Dockerfile b/docker/dockerfiles/binary_injected.Dockerfile index ac1fd5317c6..c8930bd83f0 100644 --- a/docker/dockerfiles/binary_injected.Dockerfile +++ b/docker/dockerfiles/binary_injected.Dockerfile @@ -2,7 +2,7 @@ FROM docker.io/parity/base-bin # This file allows building a Generic container image # based on one or multiple pre-built Linux binaries. -# Some defaults are set to polkadot but all can be overriden. +# Some defaults are set to polkadot but all can be overridden. SHELL ["/bin/bash", "-c"] diff --git a/docker/scripts/build-injected.sh b/docker/scripts/build-injected.sh index f415cf43c0e..749d0fa335c 100755 --- a/docker/scripts/build-injected.sh +++ b/docker/scripts/build-injected.sh @@ -20,7 +20,7 @@ PROJECT_ROOT=${PROJECT_ROOT:-$(git rev-parse --show-toplevel)} DOCKERFILE=${DOCKERFILE:-docker/dockerfiles/binary_injected.Dockerfile} VERSION_TOML=$(grep "^version " $PROJECT_ROOT/Cargo.toml | grep -oE "([0-9\.]+-?[0-9]+)") -#n The following VAR have default that can be overriden +#n The following VAR have default that can be overridden DOCKER_OWNER=${DOCKER_OWNER:-parity} # We may get 1..n binaries, comma separated diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 3f84d45640f..434202ed693 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-sdk-docs" -description = "The one stop shop for developers of the polakdot-sdk" +description = "The one stop shop for developers of the polkadot-sdk" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "paritytech.github.io" repository.workspace = true diff --git a/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs index 7d870b43221..cbbf611f9dc 100644 --- a/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs +++ b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs @@ -16,7 +16,7 @@ //! in a process called "Runtime Upgrades". //! //! Forkless runtime upgrades are a defining feature of the Substrate framework. Updating the -//! runtime logic without forking the code base enables your blockchain to seemlessly evolve +//! runtime logic without forking the code base enables your blockchain to seamlessly evolve //! over time in a deterministic, rules-based manner. It also removes ambiguity for node operators //! and other participants in the network about what is the canonical runtime. //! @@ -24,7 +24,7 @@ //! //! ## Performing a Runtime Upgrade //! -//! To upgrade a runtime, an [`Origin`](frame_system::RawOrigin) with the necesarry permissions +//! To upgrade a runtime, an [`Origin`](frame_system::RawOrigin) with the necessary permissions //! (usually via governance) changes the `:code` storage. Usually, this is performed via a call to //! [`set_code`] (or [`set_code_without_checks`]) with the desired new runtime blob, scheduled //! using [`pallet_scheduler`]. @@ -41,7 +41,7 @@ //! //! The typical use case of a migration is to 'migrate' pallet storage from one layout to another, //! for example when the encoding of a storage item is changed. However, they can also execute -//! arbitary logic such as: +//! arbitrary logic such as: //! //! - Calling arbitrary pallet methods //! - Mutating arbitrary on-chain state @@ -88,7 +88,7 @@ //! //! Prior to deploying migrations, it is critical to perform additional checks to ensure that when //! run in our real runtime they will not brick the chain due to: -//! - Panicing +//! - Panicking //! - Touching too many storage keys and resulting in an excessively large PoV //! - Taking too long to execute //! diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index e1bc2309a94..74e19044469 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -122,7 +122,7 @@ pub struct RunCmd { /// Overseer message capacity override. /// - /// **Dangerous!** Do not touch unless explicitly adviced to. + /// **Dangerous!** Do not touch unless explicitly advised to. #[arg(long)] pub overseer_channel_capacity_override: Option, diff --git a/polkadot/grafana/README.md b/polkadot/grafana/README.md index 7350001bfa1..e909fdd29a7 100644 --- a/polkadot/grafana/README.md +++ b/polkadot/grafana/README.md @@ -8,7 +8,7 @@ monitor the liveliness and performance of a network and its validators. # How does it work ? Just import the dashboard JSON files from this folder in your Grafana installation. All dashboards are grouped in -folder percategory (like for example `parachains`). The files have been created by Grafana export functionality and +folder per category (like for example `parachains`). The files have been created by Grafana export functionality and follow the data model specified [here](https://grafana.com/docs/grafana/latest/dashboards/json-model/). We aim to keep the dashboards here in sync with the implementation, except dashboards for development and diff --git a/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs index b979cb7ef45..b0966ad01f7 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs @@ -254,7 +254,7 @@ fn canonicalize_works() { // -> B1 -> C1 -> D1 // A -> B2 -> C2 -> D2 // - // We'll canonicalize C1. Everytning except D1 should disappear. + // We'll canonicalize C1. Everything except D1 should disappear. // // Candidates: // Cand1 in B2 diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs index df6e4754dbd..1081d79884f 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs @@ -79,7 +79,7 @@ pub fn v1_to_latest(db: Arc, config: Config) -> Result<()> { block.candidates().iter().enumerate() { // Loading the candidate will also perform the conversion to the updated format and - // return that represantation. + // return that representation. if let Some(candidate_entry) = backend .load_candidate_entry_v1(&candidate_hash, candidate_index as CandidateIndex) .map_err(|e| Error::InternalError(e))? diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs index 6021b44c276..5fa915add41 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs @@ -269,7 +269,7 @@ fn canonicalize_works() { // -> B1 -> C1 -> D1 // A -> B2 -> C2 -> D2 // - // We'll canonicalize C1. Everytning except D1 should disappear. + // We'll canonicalize C1. Everything except D1 should disappear. // // Candidates: // Cand1 in B2 diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs index ad5e89ef3de..d1e7ee08225 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs @@ -60,7 +60,7 @@ pub fn v2_to_latest(db: Arc, config: Config) -> Result<()> { block.candidates().iter().enumerate() { // Loading the candidate will also perform the conversion to the updated format and - // return that represantation. + // return that representation. if let Some(candidate_entry) = backend .load_candidate_entry_v2(&candidate_hash, candidate_index as CandidateIndex) .map_err(|e| Error::InternalError(e))? @@ -104,7 +104,7 @@ pub fn v1_to_latest_sanity_check( for block in all_blocks { for (_core_index, candidate_hash) in block.candidates() { // Loading the candidate will also perform the conversion to the updated format and - // return that represantation. + // return that representation. if let Some(candidate_entry) = backend.load_candidate_entry(&candidate_hash).unwrap() { candidates.insert(candidate_entry.candidate.hash()); } diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs index 08c65461bca..7c0cf9d4f7d 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs @@ -264,7 +264,7 @@ fn canonicalize_works() { // -> B1 -> C1 -> D1 // A -> B2 -> C2 -> D2 // - // We'll canonicalize C1. Everytning except D1 should disappear. + // We'll canonicalize C1. Everything except D1 should disappear. // // Candidates: // Cand1 in B2 diff --git a/polkadot/node/core/approval-voting/src/criteria.rs b/polkadot/node/core/approval-voting/src/criteria.rs index 1ebea2641b6..57c0ac272dc 100644 --- a/polkadot/node/core/approval-voting/src/criteria.rs +++ b/polkadot/node/core/approval-voting/src/criteria.rs @@ -148,7 +148,7 @@ fn relay_vrf_modulo_cores( generate_samples(rand_chacha, num_samples as usize, max_cores as usize) } -/// Generates `num_sumples` randomly from (0..max_cores) range +/// Generates `num_samples` randomly from (0..max_cores) range /// /// Note! The algorithm can't change because validators on the other /// side won't be able to check the assignments until they update. diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index 7a56e9fd112..d34191fba31 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -91,7 +91,7 @@ enum ImportedBlockInfoError { #[error(transparent)] RuntimeError(RuntimeApiError), - #[error("future cancalled while requesting {0}")] + #[error("future cancelled while requesting {0}")] FutureCancelled(&'static str, futures::channel::oneshot::Canceled), #[error(transparent)] diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 1d7ab3eee21..1a62c9ee55e 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -1850,7 +1850,7 @@ async fn get_approval_signatures_for_candidate( gum::trace!( target: LOG_TARGET, ?candidate_hash, - "Spawning task for fetching sinatures from approval-distribution" + "Spawning task for fetching signatures from approval-distribution" ); ctx.spawn("get-approval-signatures", Box::pin(get_approvals)) } diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index c9053232a4c..a3013eab46d 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -3154,7 +3154,7 @@ where // starting configuration. The relevant ticks (all scheduled wakeups) are printed after no further // ticks are scheduled. To create a valid test, a prefix of the relevant ticks should be included // in the final test configuration, ending at the tick with the desired inputs to -// should_trigger_assignemnt. +// should_trigger_assignment. async fn step_until_done(clock: &MockClock) { let mut relevant_ticks = Vec::new(); loop { @@ -3837,7 +3837,7 @@ fn test_approval_is_sent_on_max_approval_coalesce_count() { async fn handle_approval_on_max_coalesce_count( virtual_overseer: &mut VirtualOverseer, - candidate_indicies: Vec, + candidate_indices: Vec, ) { assert_matches!( overseer_recv(virtual_overseer).await, @@ -3845,16 +3845,16 @@ async fn handle_approval_on_max_coalesce_count( _, c_indices, )) => { - assert_eq!(TryInto::::try_into(candidate_indicies.clone()).unwrap(), c_indices); + assert_eq!(TryInto::::try_into(candidate_indices.clone()).unwrap(), c_indices); } ); - for _ in &candidate_indicies { + for _ in &candidate_indices { recover_available_data(virtual_overseer).await; fetch_validation_code(virtual_overseer).await; } - for _ in &candidate_indicies { + for _ in &candidate_indices { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive{exec_kind, response_sender, ..}) if exec_kind == PvfExecKind::Approval => { @@ -3885,7 +3885,7 @@ async fn handle_approval_on_max_coalesce_count( assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(vote)) => { - assert_eq!(TryInto::::try_into(candidate_indicies).unwrap(), vote.candidate_indices); + assert_eq!(TryInto::::try_into(candidate_indices).unwrap(), vote.candidate_indices); } ); @@ -3895,7 +3895,7 @@ async fn handle_approval_on_max_coalesce_count( async fn handle_approval_on_max_wait_time( virtual_overseer: &mut VirtualOverseer, - candidate_indicies: Vec, + candidate_indices: Vec, clock: Box, ) { const TICK_NOW_BEGIN: u64 = 1; @@ -3909,16 +3909,16 @@ async fn handle_approval_on_max_wait_time( _, c_indices, )) => { - assert_eq!(TryInto::::try_into(candidate_indicies.clone()).unwrap(), c_indices); + assert_eq!(TryInto::::try_into(candidate_indices.clone()).unwrap(), c_indices); } ); - for _ in &candidate_indicies { + for _ in &candidate_indices { recover_available_data(virtual_overseer).await; fetch_validation_code(virtual_overseer).await; } - for _ in &candidate_indicies { + for _ in &candidate_indices { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive{exec_kind, response_sender, ..}) if exec_kind == PvfExecKind::Approval => { @@ -3978,7 +3978,7 @@ async fn handle_approval_on_max_wait_time( assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(vote)) => { - assert_eq!(TryInto::::try_into(candidate_indicies).unwrap(), vote.candidate_indices); + assert_eq!(TryInto::::try_into(candidate_indices).unwrap(), vote.candidate_indices); } ); @@ -4319,7 +4319,7 @@ fn subsystem_relaunches_approval_work_on_restart() { virtual_overseer }); - // Restart a new approval voting subsystem with the same database and major syncing true untill + // Restart a new approval voting subsystem with the same database and major syncing true until // the last leaf. let config = HarnessConfigBuilder::default().backend(store_clone).major_syncing(true).build(); diff --git a/polkadot/node/core/approval-voting/src/time.rs b/polkadot/node/core/approval-voting/src/time.rs index 99dfbe07678..5c3e7e85a17 100644 --- a/polkadot/node/core/approval-voting/src/time.rs +++ b/polkadot/node/core/approval-voting/src/time.rs @@ -126,13 +126,13 @@ impl DelayedApprovalTimer { /// no additional timer is started. pub(crate) fn maybe_arm_timer( &mut self, - wait_untill: Tick, + wait_until: Tick, clock: &dyn Clock, block_hash: Hash, validator_index: ValidatorIndex, ) { if self.blocks.insert(block_hash) { - let clock_wait = clock.wait(wait_untill); + let clock_wait = clock.wait(wait_until); self.timers.push(Box::pin(async move { clock_wait.await; (block_hash, validator_index) diff --git a/polkadot/node/core/av-store/src/lib.rs b/polkadot/node/core/av-store/src/lib.rs index ef7dcecac07..68db4686a97 100644 --- a/polkadot/node/core/av-store/src/lib.rs +++ b/polkadot/node/core/av-store/src/lib.rs @@ -1218,7 +1218,7 @@ fn process_message( // tx channel is dropped and that error is caught by the caller subsystem. // // We bubble up the specific error here so `av-store` logs still tell what - // happend. + // happened. return Err(e.into()) }, } diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 532ae2bd7cb..4b6beb5592e 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -292,7 +292,7 @@ struct State { /// Cache the per-session Validator->Group mapping. validator_to_group_cache: LruMap>>>, - /// A cloneable sender which is dispatched to background candidate validation tasks to inform + /// A clonable sender which is dispatched to background candidate validation tasks to inform /// the main task of the result. background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, /// The handle to the keystore used for signing. diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 8237137fdca..ec24434db24 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -617,7 +617,7 @@ async fn validate_candidate_exhaustive( Err(e) => { gum::info!(target: LOG_TARGET, ?para_id, err=?e, "Invalid candidate (validation code)"); - // Code already passed pre-checking, if decompression fails now this most likley means + // Code already passed pre-checking, if decompression fails now this most likely means // some local corruption happened. return Err(ValidationFailed("Code decompression failed".to_string())) }, diff --git a/polkadot/node/core/chain-selection/src/lib.rs b/polkadot/node/core/chain-selection/src/lib.rs index aa5bb9548ad..6f864fefb61 100644 --- a/polkadot/node/core/chain-selection/src/lib.rs +++ b/polkadot/node/core/chain-selection/src/lib.rs @@ -51,7 +51,7 @@ type Timestamp = u64; // If a block isn't approved in 120 seconds, nodes will abandon it // and begin building on another chain. const STAGNANT_TIMEOUT: Timestamp = 120; -// Delay prunning of the stagnant keys in prune only mode by 25 hours to avoid interception with the +// Delay pruning of the stagnant keys in prune only mode by 25 hours to avoid interception with the // finality const STAGNANT_PRUNE_DELAY: Timestamp = 25 * 60 * 60; // Maximum number of stagnant entries cleaned during one `STAGNANT_TIMEOUT` iteration @@ -237,7 +237,7 @@ impl Clock for SystemClock { // // The exact time that a block becomes stagnant in the local node is always expected // to differ from other nodes due to network asynchrony and delays in block propagation. - // Non-monotonicity exarcerbates that somewhat, but not meaningfully. + // Non-monotonicity exacerbates that somewhat, but not meaningfully. match SystemTime::now().duration_since(UNIX_EPOCH) { Ok(d) => d.as_secs(), diff --git a/polkadot/node/core/chain-selection/src/tests.rs b/polkadot/node/core/chain-selection/src/tests.rs index cf021c0efeb..bc998f268a0 100644 --- a/polkadot/node/core/chain-selection/src/tests.rs +++ b/polkadot/node/core/chain-selection/src/tests.rs @@ -406,7 +406,7 @@ async fn import_chains_into_empty( // some pre-blocks may need to be supplied to answer ancestry requests // that gather batches beyond the beginning of the new chain. // pre-blocks are those already known by the subsystem, however, -// the subsystem has no way of knowin that until requesting ancestry. +// the subsystem has no way of knowing that until requesting ancestry. async fn import_all_blocks_into( virtual_overseer: &mut VirtualOverseer, backend: &TestBackend, @@ -1300,7 +1300,7 @@ fn finalize_erases_unviable_from_one_but_not_all_reverts() { // F <- A1 <- A2 <- A3 // // A3 reverts A2 and A1. - // Finalize A1. A2 is stil unviable. + // Finalize A1. A2 is still unviable. let (a3_hash, chain_a) = construct_chain_on_base(vec![1, 2, 3], finalized_number, finalized_hash, |h| { diff --git a/polkadot/node/core/dispute-coordinator/src/db/v1.rs b/polkadot/node/core/dispute-coordinator/src/db/v1.rs index f0f17d2325d..4950765cf51 100644 --- a/polkadot/node/core/dispute-coordinator/src/db/v1.rs +++ b/polkadot/node/core/dispute-coordinator/src/db/v1.rs @@ -341,7 +341,7 @@ pub(crate) fn note_earliest_session( let lower_bound = (new_earliest_session, CandidateHash(Hash::repeat_byte(0x00))); let new_recent_disputes = recent_disputes.split_off(&lower_bound); - // Any remanining disputes are considered ancient and must be pruned. + // Any remaining disputes are considered ancient and must be pruned. let pruned_disputes = recent_disputes; if pruned_disputes.len() != 0 { diff --git a/polkadot/node/core/dispute-coordinator/src/lib.rs b/polkadot/node/core/dispute-coordinator/src/lib.rs index 4b511e7430a..daa384b36ff 100644 --- a/polkadot/node/core/dispute-coordinator/src/lib.rs +++ b/polkadot/node/core/dispute-coordinator/src/lib.rs @@ -462,7 +462,7 @@ async fn wait_for_first_leaf(ctx: &mut Context) -> Result { // Rare case where same candidate was present on multiple heights, but all are // pruned at the same time. This candidate was already pruned in the previous - // occurence so it is skipped now. + // occurrence so it is skipped now. }, Entry::Occupied(mut e) => { let mut blocks_including = std::mem::take(e.get_mut()); diff --git a/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs b/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs index 748f9a16f49..726dda596d7 100644 --- a/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs @@ -542,8 +542,8 @@ fn scraper_handles_backed_but_not_included_candidate() { } #[test] -fn scraper_handles_the_same_candidate_incuded_in_two_different_block_heights() { - // Same candidate will be inclued in these two leaves +fn scraper_handles_the_same_candidate_included_in_two_different_block_heights() { + // Same candidate will be included in these two leaves let test_targets = vec![2, 3]; // How many blocks should we skip before sending a leaf update. diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs index 0360e357bee..7c1f4ff241d 100644 --- a/polkadot/node/core/dispute-coordinator/src/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/tests.rs @@ -2226,7 +2226,7 @@ fn resume_dispute_without_local_statement() { test_state }) }) - // Alice should send a DisputeParticiationMessage::Participate on restart since she has no + // Alice should send a DisputeParticipationMessage::Participate on restart since she has no // local statement for the active dispute. .resume(|mut test_state, mut virtual_overseer| { Box::pin(async move { @@ -2390,7 +2390,7 @@ fn resume_dispute_with_local_statement() { test_state }) }) - // Alice should not send a DisputeParticiationMessage::Participate on restart since she has a + // Alice should not send a DisputeParticipationMessage::Participate on restart since she has a // local statement for the active dispute, instead she should try to (re-)send her vote. .resume(|mut test_state, mut virtual_overseer| { let candidate_receipt = make_valid_candidate_receipt(); @@ -2495,7 +2495,7 @@ fn resume_dispute_without_local_statement_or_local_key() { test_state }) }) - // Two should not send a DisputeParticiationMessage::Participate on restart since she is no + // Two should not send a DisputeParticipationMessage::Participate on restart since she is no // validator in that dispute. .resume(|mut test_state, mut virtual_overseer| { Box::pin(async move { diff --git a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs index cb55ce39bc8..d7a5a811336 100644 --- a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs +++ b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs @@ -52,7 +52,7 @@ pub const MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME: usize = 200; /// `dispute-coordinator`. /// /// This value should be less than `MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME`. Increase it in case -/// `provisioner` sends too many `QueryCandidateVotes` messages to `dispite-coordinator`. +/// `provisioner` sends too many `QueryCandidateVotes` messages to `dispute-coordinator`. #[cfg(not(test))] const VOTES_SELECTION_BATCH_SIZE: usize = 1_100; #[cfg(test)] diff --git a/polkadot/node/core/pvf-checker/src/tests.rs b/polkadot/node/core/pvf-checker/src/tests.rs index b0401ecdc3b..b2365fe53e5 100644 --- a/polkadot/node/core/pvf-checker/src/tests.rs +++ b/polkadot/node/core/pvf-checker/src/tests.rs @@ -39,8 +39,8 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; type VirtualOverseer = TestSubsystemContextHandle; -fn dummy_validation_code_hash(descriminator: u8) -> ValidationCodeHash { - ValidationCode(vec![descriminator]).hash() +fn dummy_validation_code_hash(discriminator: u8) -> ValidationCodeHash { + ValidationCode(vec![discriminator]).hash() } struct StartsNewSession { @@ -511,7 +511,7 @@ fn reactivating_pvf_leads_to_second_check() { .reply(PreCheckOutcome::Valid); test_state.expect_submit_vote(&mut handle).await.reply_ok(); - // Now activate a descdedant leaf, where the PVF is not present. + // Now activate a descendant leaf, where the PVF is not present. test_state .active_leaves_update( &mut handle, diff --git a/polkadot/node/core/pvf/common/src/pvf.rs b/polkadot/node/core/pvf/common/src/pvf.rs index 3f5b4d7ca70..340dffe07c3 100644 --- a/polkadot/node/core/pvf/common/src/pvf.rs +++ b/polkadot/node/core/pvf/common/src/pvf.rs @@ -85,9 +85,9 @@ impl PvfPrepData { /// Creates a structure for tests. #[cfg(feature = "test-utils")] pub fn from_discriminator_and_timeout(num: u32, timeout: Duration) -> Self { - let descriminator_buf = num.to_le_bytes().to_vec(); + let discriminator_buf = num.to_le_bytes().to_vec(); Self::from_code( - descriminator_buf, + discriminator_buf, ExecutorParams::default(), timeout, PrepareJobKind::Compilation, diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 8ec46f4b08f..59d5a7e20a8 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -917,7 +917,7 @@ async fn sweeper_task(mut sweeper_rx: mpsc::Receiver) { gum::trace!( target: LOG_TARGET, ?result, - "Sweeped the artifact file {}", + "Swept the artifact file {}", condemned.display(), ); }, diff --git a/polkadot/node/core/pvf/src/priority.rs b/polkadot/node/core/pvf/src/priority.rs index d4bd49eaee8..d1ef9c604b1 100644 --- a/polkadot/node/core/pvf/src/priority.rs +++ b/polkadot/node/core/pvf/src/priority.rs @@ -29,7 +29,7 @@ pub enum Priority { } impl Priority { - /// Returns `true` if `self` is `Crticial` + /// Returns `true` if `self` is `Critical` pub fn is_critical(self) -> bool { self == Priority::Critical } diff --git a/polkadot/node/core/pvf/src/worker_interface.rs b/polkadot/node/core/pvf/src/worker_interface.rs index ad9f0294c09..93fffc80662 100644 --- a/polkadot/node/core/pvf/src/worker_interface.rs +++ b/polkadot/node/core/pvf/src/worker_interface.rs @@ -130,11 +130,11 @@ where fn make_tmppath(prefix: &str, dir: &Path) -> PathBuf { use rand::distributions::Alphanumeric; - const DESCRIMINATOR_LEN: usize = 10; + const DISCRIMINATOR_LEN: usize = 10; - let mut buf = Vec::with_capacity(prefix.len() + DESCRIMINATOR_LEN); + let mut buf = Vec::with_capacity(prefix.len() + DISCRIMINATOR_LEN); buf.extend(prefix.as_bytes()); - buf.extend(rand::thread_rng().sample_iter(&Alphanumeric).take(DESCRIMINATOR_LEN)); + buf.extend(rand::thread_rng().sample_iter(&Alphanumeric).take(DISCRIMINATOR_LEN)); let s = std::str::from_utf8(&buf) .expect("the string is collected from a valid utf-8 sequence; qed"); diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index cdfbcd8e578..16ef23c69ca 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -543,9 +543,9 @@ async fn all_security_features_work() { // artifacts cache existing. #[cfg(all(feature = "ci-only-tests", target_os = "linux"))] #[tokio::test] -async fn nonexistant_cache_dir() { +async fn nonexistent_cache_dir() { let host = TestHost::new_with_config(|cfg| { - cfg.cache_path = cfg.cache_path.join("nonexistant_cache_dir"); + cfg.cache_path = cfg.cache_path.join("nonexistent_cache_dir"); }) .await; diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index fefd2d3f862..b51682aa0f4 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -299,12 +299,12 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { #[test] fn requests_authorities() { let (ctx, mut ctx_handle) = make_subsystem_context(TaskExecutor::new()); - let substem_client = Arc::new(MockSubsystemClient::default()); + let subsystem_client = Arc::new(MockSubsystemClient::default()); let relay_parent = [1; 32].into(); let spawner = sp_core::testing::TaskExecutor::new(); let subsystem = - RuntimeApiSubsystem::new(substem_client.clone(), Metrics(None), SpawnGlue(spawner)); + RuntimeApiSubsystem::new(subsystem_client.clone(), Metrics(None), SpawnGlue(spawner)); let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); let test_task = async move { let (tx, rx) = oneshot::channel(); @@ -315,7 +315,7 @@ fn requests_authorities() { }) .await; - assert_eq!(rx.await.unwrap().unwrap(), substem_client.authorities); + assert_eq!(rx.await.unwrap().unwrap(), subsystem_client.authorities); ctx_handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; }; @@ -1042,7 +1042,7 @@ fn requests_submit_pvf_check_statement() { let _ = rx.await.unwrap().unwrap(); assert_eq!( - &*subsystem_client.submitted_pvf_check_statement.lock().expect("poisened mutex"), + &*subsystem_client.submitted_pvf_check_statement.lock().expect("poisoned mutex"), &[(stmt.clone(), sig.clone()), (stmt.clone(), sig.clone())] ); diff --git a/polkadot/node/jaeger/src/spans.rs b/polkadot/node/jaeger/src/spans.rs index 4038d41344f..4816fccf3b9 100644 --- a/polkadot/node/jaeger/src/spans.rs +++ b/polkadot/node/jaeger/src/spans.rs @@ -70,7 +70,7 @@ //! let root_span = //! jaeger::Span::new(relay_parent, "root_of_aaall_spans"); //! -//! // the prefered way of adding additional delayed information: +//! // the preferred way of adding additional delayed information: //! let span = root_span.child("inner"); //! //! // ... more operations ... diff --git a/polkadot/node/malus/README.md b/polkadot/node/malus/README.md index e0c7893a753..25453a1980e 100644 --- a/polkadot/node/malus/README.md +++ b/polkadot/node/malus/README.md @@ -18,7 +18,7 @@ defined in the [(DSL[(**D**omain **S**pecific **L**anguage)]) doc](https://parit ## Usage -> Assumes you already gained permissiones, ping in element `@javier:matrix.parity.io` to get access. +> Assumes you already gained permissions, ping in element `@javier:matrix.parity.io` to get access. > and you have cloned the [zombienet][zombienet] repo. To launch a test case in the development cluster use (e.g. for the ./node/malus/integrationtests/0001-dispute-valid-block.toml): diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index f4e40270160..d360a18423e 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -230,7 +230,7 @@ impl ApprovalEntry { Ok(()) } - // Get the assignment certiticate and claimed candidates. + // Get the assignment certificate and claimed candidates. pub fn assignment(&self) -> (IndirectAssignmentCertV2, CandidateBitfield) { (self.assignment.clone(), self.assignment_claimed_candidates.clone()) } @@ -404,7 +404,7 @@ impl Knowledge { }, }; - // In case of succesful insertion of multiple candidate assignments create additional + // In case of successful insertion of multiple candidate assignments create additional // entries for each assigned candidate. This fakes knowledge of individual assignments, but // we need to share the same `MessageSubject` with the followup approval candidate index. if kind == MessageKind::Assignment && success && message.1.count_ones() > 1 { @@ -1897,10 +1897,10 @@ impl State { _ => break, }; - // Any peer which is in the `known_by` see and we know its peer_id authorithy id + // Any peer which is in the `known_by` see and we know its peer_id authority id // mapping has already been sent all messages it's meant to get for that block and // all in-scope prior blocks. In case, we just learnt about its peer_id - // authorithy-id mapping we have to retry sending the messages that should be sent + // authority-id mapping we have to retry sending the messages that should be sent // to it for all un-finalized blocks. if entry.known_by.contains_key(&peer_id) && !retry_known_blocks { break @@ -2199,7 +2199,7 @@ impl State { sanitized_assignments } - // Filter out obviously invalid candidate indicies. + // Filter out obviously invalid candidate indices. async fn sanitize_v1_approvals( &mut self, peer_id: PeerId, @@ -2226,7 +2226,7 @@ impl State { sanitized_approvals } - // Filter out obviously invalid candidate indicies. + // Filter out obviously invalid candidate indices. async fn sanitize_v2_approvals( &mut self, peer_id: PeerId, @@ -2260,7 +2260,7 @@ impl State { // The modifier accepts as inputs the current required-routing state, whether // the message is locally originating, and the validator index of the message issuer. // -// Then, if the topology is known, this progates messages to all peers in the required +// Then, if the topology is known, this propagates messages to all peers in the required // routing set which are aware of the block. Peers which are unaware of the block // will have the message sent when it enters their view in `unify_with_peer`. // @@ -2440,7 +2440,7 @@ impl ApprovalDistribution { gum::trace!(target: LOG_TARGET, "active leaves signal (ignored)"); // the relay chain blocks relevant to the approval subsystems // are those that are available, but not finalized yet - // actived and deactivated heads hence are irrelevant to this subsystem, other than + // activated and deactivated heads hence are irrelevant to this subsystem, other than // for tracing purposes. if let Some(activated) = update.activated { let head = activated.hash; diff --git a/polkadot/node/network/approval-distribution/src/metrics.rs b/polkadot/node/network/approval-distribution/src/metrics.rs index 0642b1b2e0c..60c7f2f6d3b 100644 --- a/polkadot/node/network/approval-distribution/src/metrics.rs +++ b/polkadot/node/network/approval-distribution/src/metrics.rs @@ -299,7 +299,7 @@ impl MetricsTrait for Metrics { prometheus::CounterVec::new( prometheus::Opts::new( "polkadot_parachain_assignments_received_result", - "Result of a processed assignement", + "Result of a processed assignment", ), &["status"] )?, diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 6c88dd53ad3..3159fe2ae5e 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -394,7 +394,7 @@ fn try_import_the_same_assignment() { setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V1).await; - // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // Set up a gossip topology, where a, b, c and d are topology neighbors to the node under // testing. let peers_with_optional_peer_id = peers .iter() @@ -491,7 +491,7 @@ fn try_import_the_same_assignment_v2() { setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V3).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V3).await; - // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // Set up a gossip topology, where a, b, c and d are topology neighbors to the node under // testing. let peers_with_optional_peer_id = peers .iter() @@ -744,7 +744,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), @@ -850,7 +850,7 @@ fn import_approval_happy_path_v1_v2_peers() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Set up a gossip topology, where a, b, and c are topology neighboors to the node. + // Set up a gossip topology, where a, b, and c are topology neighbors to the node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0, 1], &[2, 4], 3), @@ -972,7 +972,7 @@ fn import_approval_happy_path_v2() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Set up a gossip topology, where a, b, and c are topology neighboors to the node. + // Set up a gossip topology, where a, b, and c are topology neighbors to the node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0, 1], &[2, 4], 3), @@ -1083,7 +1083,7 @@ fn multiple_assignments_covered_with_one_approval_vote() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Set up a gossip topology, where a, b, and c, d are topology neighboors to the node. + // Set up a gossip topology, where a, b, and c, d are topology neighbors to the node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0, 1], &[2, 4], 3), @@ -1273,7 +1273,7 @@ fn unify_with_peer_multiple_assignments_covered_with_one_approval_vote() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Set up a gossip topology, where a, b, and c, d are topology neighboors to the node. + // Set up a gossip topology, where a, b, and c, d are topology neighbors to the node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0, 1], &[2, 4], 3), @@ -1631,7 +1631,7 @@ fn update_peer_view() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), @@ -1758,7 +1758,7 @@ fn update_peer_view() { assert!(state.blocks.get(&hash_c).unwrap().known_by.get(peer).is_none()); } -// Tests that updating the known peer_id for a given authorithy updates the topology +// Tests that updating the known peer_id for a given authority updates the topology // and sends the required messages #[test] fn update_peer_authority_id() { @@ -1770,9 +1770,9 @@ fn update_peer_authority_id() { let neighbour_x_index = 0; let neighbour_y_index = 2; let local_index = 1; - // X neighbour, we simulate that PeerId is not known in the beginining. + // X neighbour, we simulate that PeerId is not known in the beginning. let neighbour_x = peers.get(neighbour_x_index).unwrap().0; - // Y neighbour, we simulate that PeerId is not known in the beginining. + // Y neighbour, we simulate that PeerId is not known in the beginning. let neighbour_y = peers.get(neighbour_y_index).unwrap().0; let _state = test_harness(State::default(), |mut virtual_overseer| async move { @@ -1814,7 +1814,7 @@ fn update_peer_authority_id() { }) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology( @@ -2053,7 +2053,7 @@ fn sends_assignments_even_when_state_is_approved() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), @@ -2125,7 +2125,7 @@ fn sends_assignments_even_when_state_is_approved() { } /// Same as `sends_assignments_even_when_state_is_approved_v2` but with `VRFModuloCompact` -/// assignemnts. +/// assignments. #[test] fn sends_assignments_even_when_state_is_approved_v2() { let peers = make_peers_and_authority_ids(8); @@ -2153,7 +2153,7 @@ fn sends_assignments_even_when_state_is_approved_v2() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), @@ -3509,7 +3509,7 @@ fn import_versioned_approval() { setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V2).await; - // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // Set up a gossip topology, where a, b, c and d are topology neighbors to the node under // testing. let peers_with_optional_peer_id = peers .iter() diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs index 191ee2acd97..f478defcaa9 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -348,7 +348,7 @@ impl RunningTask { async fn do_request( &mut self, validator: &AuthorityDiscoveryId, - nerwork_error_freq: &mut gum::Freq, + network_error_freq: &mut gum::Freq, canceled_freq: &mut gum::Freq, ) -> std::result::Result { gum::trace!( @@ -395,7 +395,7 @@ impl RunningTask { }, Err(RequestError::NetworkError(err)) => { gum::warn_if_frequent!( - freq: nerwork_error_freq, + freq: network_error_freq, max_rate: gum::Times::PerHour(100), target: LOG_TARGET, origin = ?validator, diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index fb8064878f4..94b9d9546cd 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -408,7 +408,7 @@ async fn handle_recover( ) -> error::Result<()> { let candidate_hash = receipt.hash(); - let span = jaeger::Span::new(candidate_hash, "availbility-recovery") + let span = jaeger::Span::new(candidate_hash, "availability-recovery") .with_stage(jaeger::Stage::AvailabilityRecovery); if let Some(result) = diff --git a/polkadot/node/network/availability-recovery/src/metrics.rs b/polkadot/node/network/availability-recovery/src/metrics.rs index d82a8f9ae5f..9f4cddc57e4 100644 --- a/polkadot/node/network/availability-recovery/src/metrics.rs +++ b/polkadot/node/network/availability-recovery/src/metrics.rs @@ -31,7 +31,7 @@ struct MetricsInner { chunk_requests_issued: Counter, /// Total number of bytes recovered /// - /// Gets incremented on each succesful recovery + /// Gets incremented on each successful recovery recovered_bytes_total: Counter, /// A counter for finished chunk requests. /// @@ -232,7 +232,7 @@ impl metrics::Metrics for Metrics { )?, full_recoveries_started: prometheus::register( Counter::new( - "polkadot_parachain_availability_recovery_recovieries_started", + "polkadot_parachain_availability_recovery_recoveries_started", "Total number of started recoveries.", )?, registry, diff --git a/polkadot/node/network/bitfield-distribution/src/metrics.rs b/polkadot/node/network/bitfield-distribution/src/metrics.rs index 71d8a01300f..bd956bcbe4a 100644 --- a/polkadot/node/network/bitfield-distribution/src/metrics.rs +++ b/polkadot/node/network/bitfield-distribution/src/metrics.rs @@ -69,14 +69,14 @@ impl MetricsTrait for Metrics { let metrics = MetricsInner { sent_own_availability_bitfields: prometheus::register( prometheus::Counter::new( - "polkadot_parachain_sent_own_availabilty_bitfields_total", + "polkadot_parachain_sent_own_availability_bitfields_total", "Number of own availability bitfields sent to other peers.", )?, registry, )?, received_availability_bitfields: prometheus::register( prometheus::Counter::new( - "polkadot_parachain_received_availabilty_bitfields_total", + "polkadot_parachain_received_availability_bitfields_total", "Number of valid availability bitfields received from other peers.", )?, registry, diff --git a/polkadot/node/network/bitfield-distribution/src/tests.rs b/polkadot/node/network/bitfield-distribution/src/tests.rs index ba2434ea47d..188b51ebccc 100644 --- a/polkadot/node/network/bitfield-distribution/src/tests.rs +++ b/polkadot/node/network/bitfield-distribution/src/tests.rs @@ -150,7 +150,7 @@ fn receive_invalid_signature() { let signing_context = SigningContext { session_index: 1, parent_hash: hash_a }; - // another validator not part of the validatorset + // another validator not part of the validator set let keystore: KeystorePtr = Arc::new(MemoryKeystore::new()); let malicious = Keystore::sr25519_generate_new(&*keystore, ValidatorId::ID, None) .expect("Malicious key created"); diff --git a/polkadot/node/network/dispute-distribution/src/sender/mod.rs b/polkadot/node/network/dispute-distribution/src/sender/mod.rs index f4acc72318a..8187f20146c 100644 --- a/polkadot/node/network/dispute-distribution/src/sender/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/sender/mod.rs @@ -76,7 +76,7 @@ pub struct DisputeSender { /// Value is the hash that was used for the query. active_sessions: HashMap, - /// All ongoing dispute sendings this subsystem is aware of. + /// All ongoing dispute sending this subsystem is aware of. /// /// Using an `IndexMap` so items can be iterated in the order of insertion. disputes: IndexMap>, @@ -105,7 +105,7 @@ struct WaitForActiveDisputesState { #[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)] impl DisputeSender { - /// Create a new `DisputeSender` which can be used to start dispute sendings. + /// Create a new `DisputeSender` which can be used to start dispute sending. pub fn new(tx: NestingSender, metrics: Metrics) -> Self { Self { active_heads: Vec::new(), @@ -362,7 +362,7 @@ async fn get_active_session_indices( runtime: &mut RuntimeInfo, active_heads: &Vec, ) -> Result> { - let mut indeces = HashMap::new(); + let mut indices = HashMap::new(); // Iterate all heads we track as active and fetch the child' session indices. for head in active_heads { let session_index = runtime.get_session_index_for_child(ctx.sender(), *head).await?; @@ -372,9 +372,9 @@ async fn get_active_session_indices( { gum::debug!(target: LOG_TARGET, ?err, ?session_index, "Can't cache SessionInfo"); } - indeces.insert(session_index, *head); + indices.insert(session_index, *head); } - Ok(indeces) + Ok(indices) } /// Retrieve Set of active disputes from the dispute coordinator. diff --git a/polkadot/node/network/dispute-distribution/src/tests/mock.rs b/polkadot/node/network/dispute-distribution/src/tests/mock.rs index e6a49f14c09..ccc050233e8 100644 --- a/polkadot/node/network/dispute-distribution/src/tests/mock.rs +++ b/polkadot/node/network/dispute-distribution/src/tests/mock.rs @@ -163,7 +163,7 @@ pub fn make_dispute_message( let invalid_vote = make_explicit_signed(MOCK_VALIDATORS[invalid_validator.0 as usize], candidate_hash, false); gum::trace!( - "Passed time for invald vote: {:#?}", + "Passed time for invalid vote: {:#?}", Instant::now().saturating_duration_since(before_request) ); DisputeMessage::from_signed_statements( diff --git a/polkadot/node/network/gossip-support/src/tests.rs b/polkadot/node/network/gossip-support/src/tests.rs index 6817c85f98d..cce78df38f3 100644 --- a/polkadot/node/network/gossip-support/src/tests.rs +++ b/polkadot/node/network/gossip-support/src/tests.rs @@ -122,7 +122,7 @@ impl MockAuthorityDiscovery { self.authorities.lock().clone() } - fn add_more_authorties( + fn add_more_authorities( &self, new_known: Vec, ) -> HashMap> { @@ -720,7 +720,7 @@ fn issues_update_authorities_after_session() { assert!(overseer.recv().timeout(TIMEOUT).await.is_none()); // 4. Connect more authorities except one - let newly_added = authority_discovery_mock.add_more_authorties(unknown_at_session); + let newly_added = authority_discovery_mock.add_more_authorities(unknown_at_session); let mut newly_added_iter = newly_added.iter(); let unconnected_at_last_retry = newly_added_iter .next() diff --git a/polkadot/node/network/protocol/src/grid_topology.rs b/polkadot/node/network/protocol/src/grid_topology.rs index 3c4372a27a2..a14d2461072 100644 --- a/polkadot/node/network/protocol/src/grid_topology.rs +++ b/polkadot/node/network/protocol/src/grid_topology.rs @@ -89,7 +89,7 @@ impl SessionGridTopology { SessionGridTopology { shuffled_indices, canonical_shuffling, peer_ids } } - /// Updates the known peer ids for the passed authorithies ids. + /// Updates the known peer ids for the passed authorities ids. pub fn update_authority_ids( &mut self, peer_id: PeerId, @@ -313,7 +313,7 @@ impl SessionGridTopologyEntry { self.topology.is_validator(peer) } - /// Updates the known peer ids for the passed authorithies ids. + /// Updates the known peer ids for the passed authorities ids. pub fn update_authority_ids( &mut self, peer_id: PeerId, @@ -345,7 +345,7 @@ impl SessionGridTopologies { self.inner.get(&session).and_then(|val| val.0.as_ref()) } - /// Updates the known peer ids for the passed authorithies ids. + /// Updates the known peer ids for the passed authorities ids. pub fn update_authority_ids( &mut self, peer_id: PeerId, diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs index 7a0ff9f4fa9..4dd94b5eac4 100644 --- a/polkadot/node/network/protocol/src/lib.rs +++ b/polkadot/node/network/protocol/src/lib.rs @@ -871,7 +871,7 @@ pub mod v2 { } /// v3 network protocol types. -/// Purpose is for chaning ApprovalDistributionMessage to +/// Purpose is for changing ApprovalDistributionMessage to /// include more than one assignment and approval in a message. pub mod v3 { use parity_scale_codec::{Decode, Encode}; diff --git a/polkadot/node/network/protocol/src/peer_set.rs b/polkadot/node/network/protocol/src/peer_set.rs index cb329607ad6..d0ae5b4a1bf 100644 --- a/polkadot/node/network/protocol/src/peer_set.rs +++ b/polkadot/node/network/protocol/src/peer_set.rs @@ -234,7 +234,7 @@ pub enum ValidationVersion { /// The second version. V2 = 2, /// The third version where changes to ApprovalDistributionMessage had been made. - /// The changes are translatable to V2 format untill assignments v2 and approvals + /// The changes are translatable to V2 format until assignments v2 and approvals /// coalescing is enabled through a runtime upgrade. V3 = 3, } diff --git a/polkadot/node/network/protocol/src/request_response/incoming/mod.rs b/polkadot/node/network/protocol/src/request_response/incoming/mod.rs index 44554483867..1d7c4a63e0c 100644 --- a/polkadot/node/network/protocol/src/request_response/incoming/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/incoming/mod.rs @@ -47,7 +47,7 @@ where Req: IsRequest + Decode + Encode, Req::Response: Encode, { - /// Create configuration for `NetworkConfiguration::request_response_porotocols` and a + /// Create configuration for `NetworkConfiguration::request_response_protocols` and a /// corresponding typed receiver. /// /// This Register that config with substrate networking and receive incoming requests via the diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs index 2fb62f56d10..87217bf084f 100644 --- a/polkadot/node/network/protocol/src/request_response/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/mod.rs @@ -287,7 +287,7 @@ impl Protocol { match self { // Hundreds of validators will start requesting their chunks once they see a candidate // awaiting availability on chain. Given that they will see that block at different - // times (due to network delays), 100 seems big enough to accomodate for "bursts", + // times (due to network delays), 100 seems big enough to accommodate for "bursts", // assuming we can service requests relatively quickly, which would need to be measured // as well. Protocol::ChunkFetchingV1 => 100, diff --git a/polkadot/node/network/protocol/src/request_response/v1.rs b/polkadot/node/network/protocol/src/request_response/v1.rs index ba29b32c4ce..60eecb69f73 100644 --- a/polkadot/node/network/protocol/src/request_response/v1.rs +++ b/polkadot/node/network/protocol/src/request_response/v1.rs @@ -183,7 +183,7 @@ impl IsRequest for AvailableDataFetchingRequest { pub struct StatementFetchingRequest { /// Data needed to locate and identify the needed statement. pub relay_parent: Hash, - /// Hash of candidate that was used create the `CommitedCandidateRecept`. + /// Hash of candidate that was used create the `CommittedCandidateReceipt`. pub candidate_hash: CandidateHash, } diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs index 81e226c4ff8..8d1683759a0 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs @@ -64,7 +64,7 @@ pub async fn respond( // late, as each requester having the data will help distributing it. // 2. If we take too long, the requests timing out will not yet have had any data sent, thus // we wasted no bandwidth. - // 3. If the queue is full, requestes will get an immediate error instead of running in a + // 3. If the queue is full, requests will get an immediate error instead of running in a // timeout, thus requesters can immediately try another peer and be faster. // // From this perspective we would not want parallel response sending at all, but we don't diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs index 2766ec9815a..08e9d69d8ee 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -1494,7 +1494,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( Err(()) => {} ); - // And now the succeding request from peer_b: + // And now the succeeding request from peer_b: let (pending_response, response_rx) = oneshot::channel(); let inner_req = StatementFetchingRequest { relay_parent: metadata.relay_parent, diff --git a/polkadot/node/network/statement-distribution/src/v2/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/cluster.rs index c09916e5620..87cdc389cb3 100644 --- a/polkadot/node/network/statement-distribution/src/v2/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/cluster.rs @@ -430,8 +430,8 @@ impl ClusterTracker { /// /// Normally we should not have pending statements to validators in our cluster, /// but if we do for all validators in our cluster, then we don't participate - /// in backing. Ocasional pending statements are expected if two authorities - /// can't detect each otehr or after restart, where it takes a while to discover + /// in backing. Occasional pending statements are expected if two authorities + /// can't detect each other or after restart, where it takes a while to discover /// the whole network. pub fn warn_if_too_many_pending_statements(&self, parent_hash: Hash) { diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 2c9cdba4ea8..d782e37f10b 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -112,7 +112,7 @@ const COST_EXCESSIVE_SECONDED: Rep = Rep::CostMinor("Sent Excessive `Seconded` S const COST_DISABLED_VALIDATOR: Rep = Rep::CostMinor("Sent a statement from a disabled validator"); const COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE: Rep = - Rep::CostMinor("Unexpected Manifest, missing knowlege for relay parent"); + Rep::CostMinor("Unexpected Manifest, missing knowledge for relay parent"); const COST_UNEXPECTED_MANIFEST_DISALLOWED: Rep = Rep::CostMinor("Unexpected Manifest, Peer Disallowed"); const COST_UNEXPECTED_MANIFEST_PEER_UNKNOWN: Rep = @@ -628,8 +628,8 @@ pub(crate) async fn handle_active_leaves_update( request_min_backing_votes(new_relay_parent, session_index, ctx.sender()).await?; let mut per_session_state = PerSessionState::new(session_info, &state.keystore, minimum_backing_votes); - if let Some(toplogy) = state.unused_topologies.remove(&session_index) { - per_session_state.supply_topology(&toplogy.topology, toplogy.local_index); + if let Some(topology) = state.unused_topologies.remove(&session_index) { + per_session_state.supply_topology(&topology.topology, topology.local_index); } state.per_session.insert(session_index, per_session_state); } diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs index bbcb268415e..fe270c8a58e 100644 --- a/polkadot/node/network/statement-distribution/src/v2/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -320,7 +320,7 @@ impl RequestManager { // need for the current node to limit itself to the same amount the // requests, because the requests are going to different nodes anyways. // While looking at https://github.com/paritytech/polkadot-sdk/issues/3314, - // found out that this requests take around 100ms to fullfill, so it + // found out that this requests take around 100ms to fulfill, so it // would make sense to try to request things as early as we can, given // we would need to request it for each candidate, around 25 right now // on kusama. diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index e16a3fd27ab..167b32a15bc 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -871,7 +871,7 @@ where gum::trace!( target: LOG_TARGET, relay_parent = ?hash, - "Leaf got activated, notifying exterinal listeners" + "Leaf got activated, notifying external listeners" ); for listener in listeners { // it's fine if the listener is no longer interested diff --git a/polkadot/node/primitives/src/approval.rs b/polkadot/node/primitives/src/approval.rs index f2a79e025af..b73cb4c717d 100644 --- a/polkadot/node/primitives/src/approval.rs +++ b/polkadot/node/primitives/src/approval.rs @@ -382,7 +382,7 @@ pub mod v2 { /// The core index chosen in this cert. core_index: CoreIndex, }, - /// Deprectated assignment. Soon to be removed. + /// Deprecated assignment. Soon to be removed. /// An assignment story based on the VRF that authorized the relay-chain block where the /// candidate was included combined with a sample number. /// diff --git a/polkadot/node/primitives/src/disputes/mod.rs b/polkadot/node/primitives/src/disputes/mod.rs index 768b95f6553..5814ecee44f 100644 --- a/polkadot/node/primitives/src/disputes/mod.rs +++ b/polkadot/node/primitives/src/disputes/mod.rs @@ -84,7 +84,7 @@ impl CandidateVotes { #[derive(Debug, Clone)] /// Valid candidate votes. /// -/// Prefere backing votes over other votes. +/// Prefer backing votes over other votes. pub struct ValidCandidateVotes { votes: BTreeMap, } @@ -133,7 +133,7 @@ impl ValidCandidateVotes { self.votes.retain(f) } - /// Get all the validator indeces we have votes for. + /// Get all the validator indices we have votes for. pub fn keys( &self, ) -> Bkeys<'_, ValidatorIndex, (ValidDisputeStatementKind, ValidatorSignature)> { diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index b6556e0be56..b102cf06c38 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -74,7 +74,7 @@ pub const VALIDATION_CODE_BOMB_LIMIT: usize = (MAX_CODE_SIZE * 4u32) as usize; pub const POV_BOMB_LIMIT: usize = (MAX_POV_SIZE * 4u32) as usize; /// How many blocks after finalization an information about backed/included candidate should be -/// pre-loaded (when scraoing onchain votes) and kept locally (when pruning). +/// pre-loaded (when scraping onchain votes) and kept locally (when pruning). /// /// We don't want to remove scraped candidates on finalization because we want to /// be sure that disputes will conclude on abandoned forks. diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index a8167370464..9575b2458a2 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -77,7 +77,7 @@ pub struct OverseerGenArgs<'a, Spawner, RuntimeClient> where Spawner: 'static + SpawnNamed + Clone + Unpin, { - /// Runtime client generic, providing the `ProvieRuntimeApi` trait besides others. + /// Runtime client generic, providing the `ProvideRuntimeApi` trait besides others. pub runtime_client: Arc, /// Underlying network service implementation. pub network_service: Arc>, diff --git a/polkadot/node/service/src/parachains_db/upgrade.rs b/polkadot/node/service/src/parachains_db/upgrade.rs index 2eceb391b15..4d737085960 100644 --- a/polkadot/node/service/src/parachains_db/upgrade.rs +++ b/polkadot/node/service/src/parachains_db/upgrade.rs @@ -115,7 +115,7 @@ pub(crate) fn try_upgrade_db_to_next_version( Some(CURRENT_VERSION) => CURRENT_VERSION, // This is an arbitrary future version, we don't handle it. Some(v) => return Err(Error::FutureVersion { current: CURRENT_VERSION, got: v }), - // No version file. For `RocksDB` we dont need to do anything. + // No version file. For `RocksDB` we don't need to do anything. None if db_kind == DatabaseKind::RocksDB => CURRENT_VERSION, // No version file. `ParityDB` did not previously have a version defined. // We handle this as a `0 -> 1` migration. @@ -183,7 +183,7 @@ fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result { // The total lag accounting for disputes. let lag_disputes = initial_leaf_number.saturating_sub(subchain_number); diff --git a/polkadot/node/subsystem-bench/grafana/availability-read.json b/polkadot/node/subsystem-bench/grafana/availability-read.json index 31c4ad3c795..96a83d2d70f 100644 --- a/polkadot/node/subsystem-bench/grafana/availability-read.json +++ b/polkadot/node/subsystem-bench/grafana/availability-read.json @@ -119,7 +119,7 @@ "editorMode": "code", "expr": "subsystem_benchmark_n_validators{}", "instant": false, - "legendFormat": "n_vaidators", + "legendFormat": "n_validators", "range": true, "refId": "A" }, @@ -1046,7 +1046,7 @@ "refId": "A" } ], - "title": "Availability subystem metrics", + "title": "Availability subsystem metrics", "type": "row" }, { @@ -1397,7 +1397,7 @@ "refId": "B" } ], - "title": "Recovery throughtput", + "title": "Recovery throughput", "transformations": [], "type": "timeseries" }, diff --git a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs index c1b31a509f6..3b08d0ed861 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs @@ -74,8 +74,8 @@ pub struct PeerMessagesGenerator { pub validator_index: ValidatorIndex, /// An array of pre-generated random samplings, that is used to determine, which nodes would /// send a given assignment, to the node under test because of the random samplings. - /// As an optimization we generate this sampling at the begining of the test and just pick - /// one randomly, because always taking the samples would be too expensive for benchamrk. + /// As an optimization we generate this sampling at the beginning of the test and just pick + /// one randomly, because always taking the samples would be too expensive for benchmark. pub random_samplings: Vec>, /// Channel for sending the generated messages to the aggregator pub tx_messages: futures::channel::mpsc::UnboundedSender<(Hash, Vec)>, @@ -234,7 +234,7 @@ impl PeerMessagesGenerator { let all_messages = all_messages .into_iter() .flat_map(|(_, mut messages)| { - // Shuffle the messages inside the same tick, so that we don't priorites messages + // Shuffle the messages inside the same tick, so that we don't priorities messages // for older nodes. we try to simulate the same behaviour as in real world. messages.shuffle(&mut rand_chacha); messages @@ -560,12 +560,12 @@ struct TestSignInfo { candidate_index: CandidateIndex, /// The validator sending the assignments validator_index: ValidatorIndex, - /// The assignments convering this candidate + /// The assignments covering this candidate assignment: TestMessageInfo, } impl TestSignInfo { - /// Helper function to create a signture for all candidates in `to_sign` parameter. + /// Helper function to create a signature for all candidates in `to_sign` parameter. /// Returns a TestMessage fn sign_candidates( to_sign: &mut Vec, diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index 450faf06123..6ab5b86baed 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -115,7 +115,7 @@ pub struct ApprovalsOptions { #[clap(short, long, default_value_t = 1.0)] /// Max candidate to be signed in a single approval. pub coalesce_std_dev: f32, - /// The maximum tranche diff between approvals coalesced toghther. + /// The maximum tranche diff between approvals coalesced together. pub coalesce_tranche_diff: u32, #[clap(short, long, default_value_t = false)] /// Enable assignments v2. @@ -170,7 +170,7 @@ struct BlockTestData { total_candidates_before: u64, /// The votes we sent. /// votes[validator_index][candidate_index] tells if validator sent vote for candidate. - /// We use this to mark the test as succesfull if GetApprovalSignatures returns all the votes + /// We use this to mark the test as successful if GetApprovalSignatures returns all the votes /// from here. votes: Arc>>, } @@ -237,7 +237,7 @@ struct GeneratedState { } /// Approval test state used by all mock subsystems to be able to answer messages emitted -/// by the approval-voting and approval-distribution-subystems. +/// by the approval-voting and approval-distribution-subsystems. /// /// This gets cloned across all mock subsystems, so if there is any information that gets /// updated between subsystems, they would have to be wrapped in Arc's. @@ -498,7 +498,7 @@ struct PeerMessageProducer { impl PeerMessageProducer { /// Generates messages by spawning a blocking task in the background which begins creating - /// the assignments/approvals and peer view changes at the begining of each block. + /// the assignments/approvals and peer view changes at the beginning of each block. fn produce_messages( mut self, env: &TestEnvironment, @@ -740,7 +740,7 @@ impl PeerMessageProducer { } } - // Initializes the candidates test data. This is used for bookeeping if more assignments and + // Initializes the candidates test data. This is used for bookkeeping if more assignments and // approvals would be needed. fn initialize_candidates_test_data( &self, @@ -767,7 +767,7 @@ impl PeerMessageProducer { } /// Helper function to build an overseer with the real implementation for `ApprovalDistribution` and -/// `ApprovalVoting` subystems and mock subsytems for all others. +/// `ApprovalVoting` subsystems and mock subsystems for all others. fn build_overseer( state: &ApprovalTestState, network: &NetworkEmulatorHandle, @@ -936,7 +936,7 @@ pub async fn bench_approvals_run( for block_num in 0..env.config().num_blocks { let mut current_slot = tick_to_slot_number(SLOT_DURATION_MILLIS, system_clock.tick_now()); - // Wait untill the time arrieves at the first slot under test. + // Wait until the time arrives at the first slot under test. while current_slot < state.generated_state.initial_slot { sleep(Duration::from_millis(5)).await; current_slot = tick_to_slot_number(SLOT_DURATION_MILLIS, system_clock.tick_now()); @@ -961,7 +961,7 @@ pub async fn bench_approvals_run( } // Wait for all blocks to be approved before exiting. - // This is an invariant of the benchmark, if this does not happen something went teribbly wrong. + // This is an invariant of the benchmark, if this does not happen something went terribly wrong. while state.last_approved_block.load(std::sync::atomic::Ordering::SeqCst) < env.config().num_blocks as u32 { diff --git a/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs index 63e383509be..f55ed99205e 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs @@ -31,7 +31,7 @@ pub struct TestMessageInfo { pub msg: protocol_v3::ApprovalDistributionMessage, /// The list of peers that would sends this message in a real topology. /// It includes both the peers that would send the message because of the topology - /// or because of randomly chosing so. + /// or because of randomly choosing so. pub sent_by: Vec, /// The tranche at which this message should be sent. pub tranche: u32, @@ -90,7 +90,7 @@ impl MessagesBundle { /// Tells if the bundle is needed for sending. /// We either send it because we need more assignments and approvals to approve the candidates - /// or because we configured the test to send messages untill a given tranche. + /// or because we configured the test to send messages until a given tranche. pub fn should_send( &self, candidates_test_data: &HashMap<(Hash, CandidateIndex), CandidateTestData>, @@ -174,24 +174,24 @@ impl TestMessageInfo { } } - /// Returns a list of candidates indicies in this message + /// Returns a list of candidates indices in this message pub fn candidate_indices(&self) -> HashSet { - let mut unique_candidate_indicies = HashSet::new(); + let mut unique_candidate_indices = HashSet::new(); match &self.msg { protocol_v3::ApprovalDistributionMessage::Assignments(assignments) => for (_assignment, candidate_indices) in assignments { for candidate_index in candidate_indices.iter_ones() { - unique_candidate_indicies.insert(candidate_index); + unique_candidate_indices.insert(candidate_index); } }, protocol_v3::ApprovalDistributionMessage::Approvals(approvals) => for approval in approvals { for candidate_index in approval.candidate_indices.iter_ones() { - unique_candidate_indicies.insert(candidate_index); + unique_candidate_indices.insert(candidate_index); } }, } - unique_candidate_indicies + unique_candidate_indices } /// Marks this message as no-shows if the number of configured no-shows is above the registered diff --git a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs index 765afdd5912..fe986669061 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -69,7 +69,7 @@ const LOG_TARGET: &str = "subsystem-bench::availability"; pub struct DataAvailabilityReadOptions { #[clap(short, long, default_value_t = false)] /// Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU as - /// we don't need to re-construct from chunks. Tipically this is only faster if nodes have + /// we don't need to re-construct from chunks. Typically this is only faster if nodes have /// enough bandwidth. pub fetch_from_backers: bool, } @@ -404,7 +404,7 @@ pub async fn benchmark_availability_write( let network = env.network().clone(); let authorities = env.authorities().clone(); - // Spawn a task that will generate `n_validator` - 1 signed bitfiends and + // Spawn a task that will generate `n_validator` - 1 signed bitfields and // send them from the emulated peers to the subsystem. // TODO: Implement topology. let messages = state.signed_bitfields.get(&relay_block_hash).expect("pregenerated").clone(); @@ -425,7 +425,7 @@ pub async fn benchmark_availability_write( // Wait for all bitfields to be processed. env.wait_until_metric( - "polkadot_parachain_received_availabilty_bitfields_total", + "polkadot_parachain_received_availability_bitfields_total", None, |value| value == (config.connected_count() * block_num) as f64, ) diff --git a/polkadot/node/subsystem-bench/src/lib/configuration.rs b/polkadot/node/subsystem-bench/src/lib/configuration.rs index 17c81c4fd35..5725a5137ec 100644 --- a/polkadot/node/subsystem-bench/src/lib/configuration.rs +++ b/polkadot/node/subsystem-bench/src/lib/configuration.rs @@ -122,10 +122,10 @@ pub struct TestConfiguration { /// Randomly sampled pov_sizes #[serde(skip)] pub pov_sizes: Vec, - /// The amount of bandiwdth remote validators have. + /// The amount of bandwidth remote validators have. #[serde(default = "default_bandwidth")] pub peer_bandwidth: usize, - /// The amount of bandiwdth our node has. + /// The amount of bandwidth our node has. #[serde(default = "default_bandwidth")] pub bandwidth: usize, /// Optional peer emulation latency (round trip time) wrt node under test @@ -205,7 +205,7 @@ impl TestConfiguration { let peer_id_to_authority = peer_ids .iter() .zip(validator_authority_id.iter()) - .map(|(peer_id, authorithy_id)| (*peer_id, authorithy_id.clone())) + .map(|(peer_id, authority_id)| (*peer_id, authority_id.clone())) .collect(); TestAuthorities { diff --git a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs index 080644da92a..fba33523be8 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs @@ -39,7 +39,7 @@ pub struct AvailabilityStoreState { const LOG_TARGET: &str = "subsystem-bench::av-store-mock"; -/// Mockup helper. Contains Ccunks and full availability data of all parachain blocks +/// Mockup helper. Contains Chunks and full availability data of all parachain blocks /// used in a test. #[derive(Clone)] pub struct NetworkAvailabilityState { diff --git a/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs index bee15c3cefd..86b030fb6fd 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs @@ -89,7 +89,7 @@ impl MockChainApi { let hash = self .state .get_header_by_number(requested_number) - .expect("Unknow block number") + .expect("Unknown block number") .hash(); sender.send(Ok(Some(hash))).unwrap(); }, diff --git a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs index d598f6447d3..ec66ad4e279 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs @@ -42,8 +42,8 @@ pub struct MockNetworkBridgeTx { network: NetworkEmulatorHandle, /// A channel to the network interface, to_network_interface: UnboundedSender, - /// Test authorithies - test_authorithies: TestAuthorities, + /// Test authorities + test_authorities: TestAuthorities, } /// A mock of the network bridge tx subsystem. @@ -58,9 +58,9 @@ impl MockNetworkBridgeTx { pub fn new( network: NetworkEmulatorHandle, to_network_interface: UnboundedSender, - test_authorithies: TestAuthorities, + test_authorities: TestAuthorities, ) -> MockNetworkBridgeTx { - Self { network, to_network_interface, test_authorithies } + Self { network, to_network_interface, test_authorities } } } @@ -125,13 +125,13 @@ impl MockNetworkBridgeTx { } }, NetworkBridgeTxMessage::ReportPeer(_) => { - // ingore rep changes + // ignore rep changes }, NetworkBridgeTxMessage::SendValidationMessage(peers, message) => { for peer in peers { self.to_network_interface .unbounded_send(NetworkMessage::MessageFromNode( - self.test_authorithies + self.test_authorities .peer_id_to_authority .get(&peer) .unwrap() diff --git a/polkadot/node/subsystem-bench/src/lib/network.rs b/polkadot/node/subsystem-bench/src/lib/network.rs index 1bc35a014ff..0f7b7d741e7 100644 --- a/polkadot/node/subsystem-bench/src/lib/network.rs +++ b/polkadot/node/subsystem-bench/src/lib/network.rs @@ -154,7 +154,7 @@ pub enum NetworkMessage { MessageFromNode(AuthorityDiscoveryId, VersionedValidationProtocol), /// A request originating from our node RequestFromNode(AuthorityDiscoveryId, Requests), - /// A request originating from an emultated peer + /// A request originating from an emulated peer RequestFromPeer(IncomingRequest), } @@ -790,9 +790,9 @@ pub fn new_network( let connected_count = config.connected_count(); - let mut peers_indicies = (0..n_peers).collect_vec(); + let mut peers_indices = (0..n_peers).collect_vec(); let (_connected, to_disconnect) = - peers_indicies.partial_shuffle(&mut thread_rng(), connected_count); + peers_indices.partial_shuffle(&mut thread_rng(), connected_count); // Node under test is always mark as disconnected. peers[NODE_UNDER_TEST as usize].disconnect(); @@ -958,7 +958,7 @@ impl Metrics { .inc_by(bytes as u64); } - /// Increment total receioved for a peer. + /// Increment total received for a peer. pub fn on_peer_received(&self, peer_index: usize, bytes: usize) { self.peer_total_received .with_label_values(vec![format!("node{}", peer_index).as_str()].as_slice()) @@ -1041,7 +1041,7 @@ mod tests { async fn test_expected_rate() { let tick_rate = 200; let budget = 1_000_000; - // rate must not exceeed 100 credits per second + // rate must not exceed 100 credits per second let mut rate_limiter = RateLimit::new(tick_rate, budget); let mut total_sent = 0usize; let start = Instant::now(); diff --git a/polkadot/node/subsystem-bench/src/lib/utils.rs b/polkadot/node/subsystem-bench/src/lib/utils.rs new file mode 100644 index 00000000000..cd206d8f322 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/utils.rs @@ -0,0 +1,76 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test utils + +use crate::usage::BenchmarkUsage; +use std::io::{stdout, Write}; + +pub struct WarmUpOptions<'a> { + /// The maximum number of runs considered for warming up. + pub warm_up: usize, + /// The number of runs considered for benchmarking. + pub bench: usize, + /// The difference in CPU usage between runs considered as normal + pub precision: f64, + /// The subsystems whose CPU usage is checked during warm-up cycles + pub subsystems: &'a [&'a str], +} + +impl<'a> WarmUpOptions<'a> { + pub fn new(subsystems: &'a [&'a str]) -> Self { + Self { warm_up: 100, bench: 3, precision: 0.02, subsystems } + } +} + +pub fn warm_up_and_benchmark( + options: WarmUpOptions, + run: impl Fn() -> BenchmarkUsage, +) -> Result { + println!("Warming up..."); + let mut usages = Vec::with_capacity(options.bench); + + for n in 1..=options.warm_up { + let curr = run(); + if let Some(prev) = usages.last() { + let diffs = options + .subsystems + .iter() + .map(|&v| { + curr.cpu_usage_diff(prev, v) + .ok_or(format!("{} not found in benchmark {:?}", v, prev)) + }) + .collect::, String>>()?; + if !diffs.iter().all(|&v| v < options.precision) { + usages.clear(); + } + } + usages.push(curr); + print!("\r{}%", n * 100 / options.warm_up); + if usages.len() == options.bench { + println!("\rTook {} runs to warm up", n.saturating_sub(options.bench)); + break; + } + stdout().flush().unwrap(); + } + + if usages.len() != options.bench { + println!("Didn't warm up after {} runs", options.warm_up); + return Err("Can't warm up".to_string()) + } + + Ok(BenchmarkUsage::average(&usages)) +} diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 92c35d1b7b9..5d05d2b56ed 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -805,7 +805,7 @@ pub enum StatementDistributionMessage { /// This data becomes intrinsics or extrinsics which should be included in a future relay chain /// block. -// It needs to be cloneable because multiple potential block authors can request copies. +// It needs to be clonable because multiple potential block authors can request copies. #[derive(Debug, Clone)] pub enum ProvisionableData { /// This bitfield indicates the availability of various candidate blocks. diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index c7b91bffb3d..d38d838fede 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -78,7 +78,7 @@ /// /// 2. The root fragment is invalid under the new constraints because it has been subsumed by /// the relay-chain. In this case, we can discard the root and split & re-root the fragment -/// tree under its descendents and compare to the new constraints again. This is the +/// tree under its descendants and compare to the new constraints again. This is the /// "prediction came true" case. /// /// 3. The root fragment is invalid under the new constraints because a competing parachain diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index aaae30db50c..6ff09ed5f22 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -382,7 +382,7 @@ pub fn signing_key_and_index<'a>( /// Sign the given data with the given validator ID. /// -/// Returns `Ok(None)` if the private key that correponds to that validator ID is not found in the +/// Returns `Ok(None)` if the private key that corresponds to that validator ID is not found in the /// given keystore. Returns an error if the key could not be used for signing. pub fn sign( keystore: &KeystorePtr, diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index 9e7f910314c..21cee753265 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -1457,7 +1457,7 @@ pub enum ValidDisputeStatementKind { #[codec(index = 3)] ApprovalChecking, /// An approval vote from the new version. - /// We can't create this version untill all nodes + /// We can't create this version until all nodes /// have been updated to support it and max_approval_coalesce_count /// is set to more than 1. #[codec(index = 4)] @@ -1604,7 +1604,7 @@ impl ValidityAttestation { pub fn to_compact_statement(&self, candidate_hash: CandidateHash) -> CompactStatement { // Explicit and implicit map directly from // `ValidityVote::Valid` and `ValidityVote::Issued`, and hence there is a - // `1:1` relationshow which enables the conversion. + // `1:1` relationship which enables the conversion. match *self { ValidityAttestation::Implicit(_) => CompactStatement::Seconded(candidate_hash), ValidityAttestation::Explicit(_) => CompactStatement::Valid(candidate_hash), diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index bd2a5106c44..94e9e892029 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -102,7 +102,7 @@ pub struct SchedulerParams { pub on_demand_fee_variability: Perbill, /// The minimum amount needed to claim a slot in the spot pricing queue. pub on_demand_base_fee: Balance, - /// The number of blocks a claim stays in the scheduler's claimqueue before getting cleared. + /// The number of blocks a claim stays in the scheduler's claim queue before getting cleared. /// This number should go reasonably higher than the number of blocks in the async backing /// lookahead. pub ttl: BlockNumber, @@ -133,7 +133,7 @@ pub type NodeFeatures = BitVec; /// Module containing feature-specific bit indices into the `NodeFeatures` bitvec. pub mod node_features { - /// A feature index used to indentify a bit into the node_features array stored + /// A feature index used to identify a bit into the node_features array stored /// in the HostConfiguration. #[repr(u8)] pub enum FeatureIndex { diff --git a/polkadot/primitives/test-helpers/src/lib.rs b/polkadot/primitives/test-helpers/src/lib.rs index b7c5d82341a..d43cf3317e5 100644 --- a/polkadot/primitives/test-helpers/src/lib.rs +++ b/polkadot/primitives/test-helpers/src/lib.rs @@ -249,7 +249,7 @@ pub fn resign_candidate_descriptor_with_collator>( descriptor.signature = signature; } -/// Extracts validators's public keus (`ValidatorId`) from `Sr25519Keyring` +/// Extracts validators's public keys (`ValidatorId`) from `Sr25519Keyring` pub fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { val_ids.iter().map(|v| v.public().into()).collect() } diff --git a/polkadot/roadmap/implementers-guide/src/disputes-flow.md b/polkadot/roadmap/implementers-guide/src/disputes-flow.md index b5cc5611c6f..540b3c45bad 100644 --- a/polkadot/roadmap/implementers-guide/src/disputes-flow.md +++ b/polkadot/roadmap/implementers-guide/src/disputes-flow.md @@ -74,7 +74,7 @@ The set of validators eligible to vote consists of the validators that had duty votes by the backing validators. If a validator receives an initial dispute message (a set of votes where there are at least two opposing votes -contained), and the PoV or Code are hence not reconstructable from local storage, that validator must request the +contained), and the PoV or Code are hence not reconstructible from local storage, that validator must request the required data from its peers. The dispute availability message must contain code, persisted validation data, and the proof of validity. diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-distribution.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-distribution.md index ce71de6f76b..c987b7fe5be 100644 --- a/polkadot/roadmap/implementers-guide/src/node/approval/approval-distribution.md +++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-distribution.md @@ -101,7 +101,7 @@ struct State { } enum MessageFingerprint { - Assigment(Hash, u32, ValidatorIndex), + Assignment(Hash, u32, ValidatorIndex), Approval(Hash, u32, ValidatorIndex), } @@ -203,7 +203,7 @@ For all peers: * Compute `view_intersection` as the intersection of the peer's view blocks with the hashes of the new blocks. * Invoke `unify_with_peer(peer, view_intersection)`. -#### `ApprovalDistributionMessage::DistributeAsignment` +#### `ApprovalDistributionMessage::DistributeAssignment` Call `import_and_circulate_assignment` with `MessageSource::Local`. diff --git a/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md b/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md index e3bb14db3a5..c57c4589244 100644 --- a/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md +++ b/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md @@ -115,7 +115,7 @@ On `Conclude`, shut down the subsystem. Launch the source as a background task running `run(recovery_task)`. -#### `run(recovery_task) -> Result` +#### `run(recovery_task) -> Result` ```rust // How many parallel requests to have going at once. diff --git a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md index e0738e219d1..90b29249f3e 100644 --- a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md +++ b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md @@ -76,7 +76,7 @@ dispute is raised reconsider their vote and send an explicit invalid vote. If th recorded, then they could avoid a slash. This is not a problem for our basic security assumptions: The backers are the ones to be supposed to have skin in the -game, so we are not too woried about colluding approval voters getting away slash free as the gambler's ruin property is +game, so we are not too worried about colluding approval voters getting away slash free as the gambler's ruin property is maintained anyway. There is however a separate problem, from colluding approval-voters, that is "lazy" approval voters. If it were easy and reliable for approval-voters to reconsider their vote, in case of an actual dispute, then they don't have a direct incentive (apart from playing a part in securing the network) to properly run the validation function at @@ -331,13 +331,13 @@ off-chain. The reason for this is a dispute can be raised for a candidate in a p validator that is going to be slashed for it might not even be in the current active set. That means it can't be disabled on-chain. We need a way to prevent someone from disputing all valid candidates in the previous era. We do this by keeping track of the validators who lost a dispute in the past few sessions and use that list in addition to the -on-chain disabled validators state. In addition to past session misbehavior, this also heps in case a slash is delayed. +on-chain disabled validators state. In addition to past session misbehavior, this also helps in case a slash is delayed. When we receive a dispute statements set, we do the following: 1. Take the on-chain state of disabled validators at the relay parent block. 1. Take a list of those who lost a dispute in that session in the order that prioritizes the biggest and newest offence. 1. Combine the two lists and take the first byzantine threshold validators from it. -1. If the dispute is unconfimed, check if all votes against the candidate are from disabled validators. +1. If the dispute is unconfirmed, check if all votes against the candidate are from disabled validators. If so, we don't participate in the dispute, but record the votes. ### Backing Votes @@ -591,7 +591,7 @@ Initiates processing via the `Participation` module and updates the internal sta ### On `MuxedMessage::Participation` -This message is sent from `Participatuion` module and indicates a processed dispute participation. It's the result of +This message is sent from `Participation` module and indicates a processed dispute participation. It's the result of the processing job initiated with `OverseerSignal::ActiveLeaves`. The subsystem issues a `DisputeMessage` with the result. diff --git a/polkadot/roadmap/implementers-guide/src/node/overseer.md b/polkadot/roadmap/implementers-guide/src/node/overseer.md index 53a11530810..960539b8499 100644 --- a/polkadot/roadmap/implementers-guide/src/node/overseer.md +++ b/polkadot/roadmap/implementers-guide/src/node/overseer.md @@ -108,11 +108,11 @@ way that the receiving subsystem can further address the communication to one of This communication prevents a certain class of race conditions. When the Overseer determines that it is time for subsystems to begin working on top of a particular relay-parent, it will dispatch a `ActiveLeavesUpdate` message to all subsystems to do so, and those messages will be handled asynchronously by those subsystems. Some subsystems will receive -those messsages before others, and it is important that a message sent by subsystem A after receiving +those messages before others, and it is important that a message sent by subsystem A after receiving `ActiveLeavesUpdate` message will arrive at subsystem B after its `ActiveLeavesUpdate` message. If subsystem A maintained an independent channel with subsystem B to communicate, it would be possible for subsystem B to handle the side message before the `ActiveLeavesUpdate` message, but it wouldn't have any logical course of action to take with the -side message - leading to it being discarded or improperly handled. Well-architectured state machines should have a +side message - leading to it being discarded or improperly handled. Well-architected state machines should have a single source of inputs, so that is what we do here. One exception is reasonable to make for responses to requests. A request should be made via the overseer in order to diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md index 7f6fef7ddf6..f8e88a67fcb 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md @@ -8,7 +8,7 @@ pre-checking. Head over to [overview] for the PVF pre-checking process overview. There is no dedicated input mechanism for PVF pre-checker. Instead, PVF pre-checker looks on the `ActiveLeavesUpdate` event stream for work. -This subsytem does not produce any output messages either. The subsystem will, however, send messages to the +This subsystem does not produce any output messages either. The subsystem will, however, send messages to the [Runtime API] subsystem to query for the pending PVFs and to submit votes. In addition to that, it will also communicate with [Candidate Validation] Subsystem to request PVF pre-check. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/README.md b/polkadot/roadmap/implementers-guide/src/runtime/README.md index 459f0e6b69d..10eedb49ec3 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/README.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/README.md @@ -89,7 +89,7 @@ struct SessionChangeNotification { prev_config: HostConfiguration, // The configuration after handling the session change. new_config: HostConfiguration, - // A secure randomn seed for the session, gathered from BABE. + // A secure random seed for the session, gathered from BABE. random_seed: [u8; 32], // The session index of the beginning session. session_index: SessionIndex, diff --git a/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md b/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md index 69d33ca8670..ed765634b59 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md @@ -62,7 +62,7 @@ HRMP related storage layout HrmpOpenChannelRequests: map HrmpChannelId => Option; HrmpOpenChannelRequestsList: Vec; -/// This mapping tracks how many open channel requests are inititated by a given sender para. +/// This mapping tracks how many open channel requests are initiated by a given sender para. /// Invariant: `HrmpOpenChannelRequests` should contain the same number of items that has `(X, _)` /// as the number of `HrmpOpenChannelRequestCount` for `X`. HrmpOpenChannelRequestCount: map ParaId => u32; @@ -233,7 +233,7 @@ executed the message. 1. Send a downward message to the opposite party notifying about the channel closing. * The DM is sent using `queue_downward_message`. * The DM is represented by the `HrmpChannelClosing` XCM message with: - * `initator` is set to `origin`, + * `initiator` is set to `origin`, * `sender` is set to `ch.sender`, * `recipient` is set to `ch.recipient`. * The opposite party is `ch.sender` if `origin` is `ch.recipient` and `ch.recipient` if `origin` is `ch.sender`. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md index 1345f0eea95..7972c706b9e 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md @@ -88,7 +88,7 @@ to `sanitize_bitfields` function for implementation details. Backed candidates sanitization removes malformed ones, candidates which have got concluded invalid disputes against them or candidates produced by unassigned cores. Furthermore any backing votes from disabled validators for a candidate are dropped. This is part of the validator disabling strategy. After filtering the statements from disabled validators a -backed candidate may end up with votes count less than `minimum_backing_votes` (a parameter from `HostConfiguiration`). +backed candidate may end up with votes count less than `minimum_backing_votes` (a parameter from `HostConfiguration`). In this case the whole candidate is dropped otherwise it will be rejected by `process_candidates` from pallet inclusion. All checks related to backed candidates are implemented in `sanitize_backed_candidates` and `filter_backed_statements_from_disabled_validators`. diff --git a/polkadot/roadmap/implementers-guide/src/types/approval.md b/polkadot/roadmap/implementers-guide/src/types/approval.md index c19ffa53762..29d973ca0ab 100644 --- a/polkadot/roadmap/implementers-guide/src/types/approval.md +++ b/polkadot/roadmap/implementers-guide/src/types/approval.md @@ -39,7 +39,7 @@ enum AssignmentCertKindV2 { /// The core index chosen in this cert. core_index: CoreIndex, }, - /// Deprectated assignment. Soon to be removed. + /// Deprecated assignment. Soon to be removed. /// /// An assignment story based on the VRF that authorized the relay-chain block where the /// candidate was included combined with a sample number. @@ -117,7 +117,7 @@ struct IndirectSignedApprovalVote { ## `CheckedAssignmentCert` An assignment cert which has checked both the VRF and the validity of the implied assignment according to the selection -criteria rules of the protocol. This type should be declared in such a way as to be instantiatable only when the checks +criteria rules of the protocol. This type should be declared in such a way as to be instantiable only when the checks have actually been done. Fields should be accessible via getters, not direct struct access. ```rust diff --git a/polkadot/roadmap/implementers-guide/src/types/disputes.md b/polkadot/roadmap/implementers-guide/src/types/disputes.md index c49e0fea262..ac09084c48a 100644 --- a/polkadot/roadmap/implementers-guide/src/types/disputes.md +++ b/polkadot/roadmap/implementers-guide/src/types/disputes.md @@ -82,7 +82,7 @@ struct DisputeState { struct ScrapedOnChainVotes { /// The session index at which the block was included. session: SessionIndex, - /// The backing and seconding validity attestations for all candidates, provigind the full candidate receipt. + /// The backing and seconding validity attestations for all candidates, providing the full candidate receipt. backing_validators_per_candidate: Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)> /// Set of concluded disputes that were recorded /// on chain within the inherent. diff --git a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md index acfe309ba7b..e011afb9708 100644 --- a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -22,7 +22,7 @@ All subsystems have their own message types; all of them need to be able to list are currently two proposals for how to handle that with unified communication channels: 1. Retaining the `OverseerSignal` definition above, add `enum FromOrchestra {Signal(OverseerSignal), Message(T)}`. -1. Add a generic varint to `OverseerSignal`: `Message(T)`. +1. Add a generic variant to `OverseerSignal`: `Message(T)`. Either way, there will be some top-level type encapsulating messages from the overseer to each subsystem. diff --git a/polkadot/roadmap/phase-1.toml b/polkadot/roadmap/phase-1.toml index 3a5f0d752de..9b9374d234b 100644 --- a/polkadot/roadmap/phase-1.toml +++ b/polkadot/roadmap/phase-1.toml @@ -34,7 +34,7 @@ requires = ["two-phase-inclusion"] items = [ { label = "Submit secondary checks to runtime", port = "submitsecondary", requires = ["secondary-checking"] }, { label = "Track all candidates within the slash period as well as their session" }, - { label = "Track reports and attestatations for candidates" }, + { label = "Track reports and attestations for candidates" }, ] [[group]] diff --git a/polkadot/runtime/common/src/xcm_sender.rs b/polkadot/runtime/common/src/xcm_sender.rs index 46d09afcfb2..4bbf6a14a40 100644 --- a/polkadot/runtime/common/src/xcm_sender.rs +++ b/polkadot/runtime/common/src/xcm_sender.rs @@ -139,7 +139,7 @@ where /// Implementation of `xcm_builder::EnsureDelivery` which helps to ensure delivery to the /// `ParaId` parachain (sibling or child). Deposits existential deposit for origin (if needed). /// Deposits estimated fee to the origin account (if needed). -/// Allows to trigger additional logic for specific `ParaId` (e.g. open HRMP channel) (if neeeded). +/// Allows to trigger additional logic for specific `ParaId` (e.g. open HRMP channel) (if needed). #[cfg(feature = "runtime-benchmarks")] pub struct ToParachainDeliveryHelper< XcmConfig, diff --git a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs index 998e39670f9..5d42a9d0c8e 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs @@ -622,7 +622,7 @@ fn assignment_proportions_in_core_state_work() { ); } - // Case 2: Current assignment remaning < step after pop + // Case 2: Current assignment remaining < step after pop { assert_eq!( CoretimeAssigner::pop_assignment_for_core(core_idx), diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index c47c8745e65..26951f34252 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -684,7 +684,7 @@ where /// Adds an order to the on demand queue. /// - /// Paramenters: + /// Parameters: /// - `location`: Whether to push this entry to the back or the front of the queue. Pushing an /// entry to the front of the queue is only used when the scheduler wants to push back an /// entry it has already popped. @@ -763,7 +763,7 @@ where /// Increases the affinity of a `ParaId` to a specified `CoreIndex`. /// Adds to the count of the `CoreAffinityCount` if an entry is found and the core_index - /// matches. A non-existant entry will be initialized with a count of 1 and uses the supplied + /// matches. A non-existent entry will be initialized with a count of 1 and uses the supplied /// `CoreIndex`. fn increase_affinity(para_id: ParaId, core_index: CoreIndex) { ParaIdAffinity::::mutate(para_id, |maybe_affinity| match maybe_affinity { diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs index 03fecc58570..193a5e46b99 100644 --- a/polkadot/runtime/parachains/src/coretime/migration.rs +++ b/polkadot/runtime/parachains/src/coretime/migration.rs @@ -238,7 +238,7 @@ mod v_coretime { return None }, }; - // We assume the coretime chain set this parameter to the recommened value in RFC-1: + // We assume the coretime chain set this parameter to the recommended value in RFC-1: const TIME_SLICE_PERIOD: u32 = 80; let round_up = if valid_until % TIME_SLICE_PERIOD > 0 { 1 } else { 0 }; let time_slice = valid_until / TIME_SLICE_PERIOD + TIME_SLICE_PERIOD * round_up; diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs index da95b060c14..cffad42e0ec 100644 --- a/polkadot/runtime/parachains/src/disputes.rs +++ b/polkadot/runtime/parachains/src/disputes.rs @@ -181,7 +181,7 @@ pub trait DisputesHandler { fn is_frozen() -> bool; /// Remove dispute statement duplicates and sort the non-duplicates based on - /// local (lower indicies) vs remotes (higher indices) and age (older with lower indices). + /// local (lower indices) vs remotes (higher indices) and age (older with lower indices). /// /// Returns `Ok(())` if no duplicates were present, `Err(())` otherwise. /// diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index 9b2b7a48dc8..9f8fa123918 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -643,7 +643,7 @@ fn is_known_offence( } } -/// Actual `HandleReports` implemention. +/// Actual `HandleReports` implementation. /// /// When configured properly, should be instantiated with /// `T::KeyOwnerIdentification, Offences, ReportLongevity` parameters. diff --git a/polkadot/runtime/parachains/src/hrmp.rs b/polkadot/runtime/parachains/src/hrmp.rs index 42592d9d9f1..d62533dc919 100644 --- a/polkadot/runtime/parachains/src/hrmp.rs +++ b/polkadot/runtime/parachains/src/hrmp.rs @@ -229,12 +229,12 @@ impl fmt::Debug for OutboundHrmpAcceptanceErr { ), TotalSizeExceeded { idx, total_size, limit } => write!( fmt, - "sending the HRMP message at index {} would exceed the neogitiated channel total size ({} > {})", + "sending the HRMP message at index {} would exceed the negotiated channel total size ({} > {})", idx, total_size, limit, ), CapacityExceeded { idx, count, limit } => write!( fmt, - "sending the HRMP message at index {} would exceed the neogitiated channel capacity ({} > {})", + "sending the HRMP message at index {} would exceed the negotiated channel capacity ({} > {})", idx, count, limit, ), } @@ -790,7 +790,7 @@ pub mod pallet { .ok_or(ArithmeticError::Underflow)?; T::Currency::unreserve( &channel_id.sender.into_account_truncating(), - // The difference should always be convertable into `Balance`, but be + // The difference should always be convertible into `Balance`, but be // paranoid and do nothing in case. amount.try_into().unwrap_or(Zero::zero()), ); diff --git a/polkadot/runtime/parachains/src/hrmp/tests.rs b/polkadot/runtime/parachains/src/hrmp/tests.rs index 7e7b67c8059..162c1412160 100644 --- a/polkadot/runtime/parachains/src/hrmp/tests.rs +++ b/polkadot/runtime/parachains/src/hrmp/tests.rs @@ -702,7 +702,7 @@ fn verify_externally_accessible() { sp_io::storage::get(&well_known_keys::hrmp_ingress_channel_index(para_b)) .expect("the ingress index must be present for para_b"); let ingress_index = >::decode(&mut &raw_ingress_index[..]) - .expect("ingress indexx should be decodable as a list of para ids"); + .expect("ingress index should be decodable as a list of para ids"); assert_eq!(ingress_index, vec![para_a]); // Now, verify that we can access and decode the egress index. diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index 5acdb9683bb..017cd87f13b 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -1756,7 +1756,7 @@ impl Pallet { // // - Empty value is treated as the current code is already inserted during the onboarding. // - // This is only an intermediate solution and should be fixed in foreseable future. + // This is only an intermediate solution and should be fixed in foreseeable future. // // [soaking issue]: https://github.com/paritytech/polkadot/issues/3918 let validation_code = diff --git a/polkadot/runtime/parachains/src/paras/tests.rs b/polkadot/runtime/parachains/src/paras/tests.rs index 262ec9d3fdb..39abd2367b7 100644 --- a/polkadot/runtime/parachains/src/paras/tests.rs +++ b/polkadot/runtime/parachains/src/paras/tests.rs @@ -1210,7 +1210,7 @@ fn code_hash_at_returns_up_to_end_of_code_retention_period() { assert_eq!(Paras::past_code_meta(¶_id).upgrade_times, vec![upgrade_at(4, 10)]); assert_eq!(Paras::current_code(¶_id), Some(new_code.clone())); - // Make sure that the old code is available **before** the code retion period passes. + // Make sure that the old code is available **before** the code retention period passes. run_to_block(10 + code_retention_period, None); assert_eq!(Paras::code_by_hash(&old_code.hash()), Some(old_code.clone())); assert_eq!(Paras::code_by_hash(&new_code.hash()), Some(new_code.clone())); @@ -1717,7 +1717,7 @@ fn poke_unused_validation_code_doesnt_remove_code_with_users() { #[test] fn increase_code_ref_doesnt_have_allergy_on_add_trusted_validation_code() { - // Verify that accidential calling of increase_code_ref or decrease_code_ref does not lead + // Verify that accidental calling of increase_code_ref or decrease_code_ref does not lead // to a disaster. // NOTE that this test is extra paranoid, as it is not really possible to hit // `decrease_code_ref` without calling `increase_code_ref` first. diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 02ddfd0acca..6a20a10a8d7 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -733,7 +733,7 @@ fn random_sel Weight>( /// are preferred. And for disputes, local and older disputes are preferred (see /// `limit_and_sanitize_disputes`). for backed candidates, since with a increasing number of /// parachains their chances of inclusion become slim. All backed candidates are checked -/// beforehands in `fn create_inherent_inner` which guarantees sanity. +/// beforehand in `fn create_inherent_inner` which guarantees sanity. /// /// Assumes disputes are already filtered by the time this is called. /// @@ -1070,7 +1070,7 @@ fn limit_and_sanitize_disputes< log::debug!(target: LOG_TARGET, "Above max consumable weight: {}/{}", disputes_weight, max_consumable_weight); let mut checked_acc = Vec::::with_capacity(disputes.len()); - // Accumualated weight of all disputes picked, that passed the checks. + // Accumulated weight of all disputes picked, that passed the checks. let mut weight_acc = Weight::zero(); // Select disputes in-order until the remaining weight is attained diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index 4c0a07d7367..c47fbab046f 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -24,7 +24,7 @@ use frame_support::{ /// Old/legacy assignment representation (v0). /// -/// `Assignment` used to be a concrete type with the same layout V0Assignment, idential on all +/// `Assignment` used to be a concrete type with the same layout V0Assignment, identical on all /// assignment providers. This can be removed once storage has been migrated. #[derive(Encode, Decode, RuntimeDebug, TypeInfo, PartialEq, Clone)] struct V0Assignment { diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs index e6a1e66b3a5..720f645e3bd 100644 --- a/polkadot/runtime/parachains/src/scheduler/tests.rs +++ b/polkadot/runtime/parachains/src/scheduler/tests.rs @@ -836,7 +836,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { // #26 now += 1; - // Run to block #n but this time have group 1 conclude the availabilty. + // Run to block #n but this time have group 1 conclude the availability. for n in now..=(now + max_timeouts + 1) { // #n run_to_block(n, |_| None); diff --git a/polkadot/runtime/parachains/src/session_info.rs b/polkadot/runtime/parachains/src/session_info.rs index 9e1b3d05842..4ca5a73d42b 100644 --- a/polkadot/runtime/parachains/src/session_info.rs +++ b/polkadot/runtime/parachains/src/session_info.rs @@ -158,7 +158,7 @@ impl Pallet { for idx in old_earliest_stored_session..new_earliest_stored_session { Sessions::::remove(&idx); // Idx will be missing for a few sessions after the runtime upgrade. - // But it shouldn'be be a problem. + // But it shouldn't be a problem. AccountKeys::::remove(&idx); SessionExecutorParams::::remove(&idx); } diff --git a/polkadot/runtime/parachains/src/ump_tests.rs b/polkadot/runtime/parachains/src/ump_tests.rs index 2ed1d64336b..5867a8fca66 100644 --- a/polkadot/runtime/parachains/src/ump_tests.rs +++ b/polkadot/runtime/parachains/src/ump_tests.rs @@ -456,7 +456,7 @@ fn verify_relay_dispatch_queue_size_is_externally_accessible() { fn assert_queue_size(para: ParaId, count: u32, size: u32) { #[allow(deprecated)] let raw_queue_size = sp_io::storage::get(&well_known_keys::relay_dispatch_queue_size(para)).expect( - "enqueing a message should create the dispatch queue\ + "enqueuing a message should create the dispatch queue\ and it should be accessible via the well known keys", ); let (c, s) = <(u32, u32)>::decode(&mut &raw_queue_size[..]) @@ -466,7 +466,7 @@ fn assert_queue_size(para: ParaId, count: u32, size: u32) { // Test the deprecated but at least type-safe `relay_dispatch_queue_size_typed`: #[allow(deprecated)] let (c, s) = well_known_keys::relay_dispatch_queue_size_typed(para).get().expect( - "enqueing a message should create the dispatch queue\ + "enqueuing a message should create the dispatch queue\ and it should be accessible via the well known keys", ); assert_eq!((c, s), (count, size)); diff --git a/polkadot/runtime/rococo/README.md b/polkadot/runtime/rococo/README.md index 5b2c296f0ce..c19c3654fe4 100644 --- a/polkadot/runtime/rococo/README.md +++ b/polkadot/runtime/rococo/README.md @@ -4,7 +4,7 @@ Rococo is a testnet runtime with no stability guarantees. ## How to build `rococo` runtime `EpochDurationInBlocks` parameter is configurable via `ROCOCO_EPOCH_DURATION` environment variable. To build wasm -runtime blob with customized epoch duration the following command shall be exectuted: +runtime blob with customized epoch duration the following command shall be executed: ```bash ROCOCO_EPOCH_DURATION=10 ./polkadot/scripts/build-only-wasm.sh rococo-runtime /path/to/output/directory/ ``` diff --git a/polkadot/runtime/westend/src/weights/xcm/mod.rs b/polkadot/runtime/westend/src/weights/xcm/mod.rs index 0162012825f..09e883a9f7a 100644 --- a/polkadot/runtime/westend/src/weights/xcm/mod.rs +++ b/polkadot/runtime/westend/src/weights/xcm/mod.rs @@ -142,7 +142,7 @@ impl XcmWeightInfo for WestendXcmWeight { fn descend_origin(_who: &InteriorLocation) -> Weight { XcmGeneric::::descend_origin() } - fn report_error(_query_repsonse_info: &QueryResponseInfo) -> Weight { + fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { XcmGeneric::::report_error() } diff --git a/polkadot/tests/common.rs b/polkadot/tests/common.rs index 15721c990e0..dbee2d36503 100644 --- a/polkadot/tests/common.rs +++ b/polkadot/tests/common.rs @@ -48,8 +48,8 @@ pub async fn wait_n_finalized_blocks(n: usize, url: &str) { /// Read the WS address from the output. /// -/// This is hack to get the actual binded sockaddr because -/// polkadot assigns a random port if the specified port was already binded. +/// This is hack to get the actual bound sockaddr because +/// polkadot assigns a random port if the specified port was already bound. /// /// You must call /// `Command::new("cmd").stdout(process::Stdio::piped()).stderr(process::Stdio::piped())` diff --git a/polkadot/tests/running_the_node_and_interrupt.rs b/polkadot/tests/running_the_node_and_interrupt.rs index 079c34e0421..85c073d3023 100644 --- a/polkadot/tests/running_the_node_and_interrupt.rs +++ b/polkadot/tests/running_the_node_and_interrupt.rs @@ -32,7 +32,7 @@ async fn running_the_node_works_and_can_be_interrupted() { }; async fn run_command_and_kill(signal: Signal) { - let tmpdir = tempdir().expect("coult not create temp dir"); + let tmpdir = tempdir().expect("could not create temp dir"); let mut cmd = Command::new(cargo_bin("polkadot")) .stdout(process::Stdio::piped()) diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index 7752d1355cd..92d23cfd281 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -248,7 +248,7 @@ fn reserve_transfer_assets_with_paid_router_works() { pub(crate) fn set_up_foreign_asset( reserve_para_id: u32, inner_junction: Option, - benficiary: AccountId, + beneficiary: AccountId, initial_amount: u128, is_sufficient: bool, ) -> (Location, AccountId, Location) { @@ -276,7 +276,7 @@ pub(crate) fn set_up_foreign_asset( assert_ok!(AssetsPallet::mint( RuntimeOrigin::signed(BOB), foreign_asset_id_location.clone(), - benficiary, + beneficiary, initial_amount )); diff --git a/polkadot/xcm/src/v2/multilocation.rs b/polkadot/xcm/src/v2/multilocation.rs index 60aa1f6cead..ac98da8d08c 100644 --- a/polkadot/xcm/src/v2/multilocation.rs +++ b/polkadot/xcm/src/v2/multilocation.rs @@ -176,7 +176,7 @@ impl MultiLocation { } /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with - /// theoriginal value of `self` in case of overflow. + /// the original value of `self` in case of overflow. pub fn pushed_with_interior(self, new: Junction) -> result::Result { match self.interior.pushed_with(new) { Ok(i) => Ok(MultiLocation { interior: i, parents: self.parents }), diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index c588b924ac7..18fe01ec8fa 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -205,7 +205,7 @@ impl MultiLocation { } /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with - /// theoriginal value of `self` in case of overflow. + /// the original value of `self` in case of overflow. pub fn pushed_with_interior( self, new: impl Into, diff --git a/polkadot/xcm/src/v4/location.rs b/polkadot/xcm/src/v4/location.rs index db55c3d3034..0e7c69864fa 100644 --- a/polkadot/xcm/src/v4/location.rs +++ b/polkadot/xcm/src/v4/location.rs @@ -210,7 +210,7 @@ impl Location { } /// Consumes `self` and returns a `Location` suffixed with `new`, or an `Err` with - /// theoriginal value of `self` in case of overflow. + /// the original value of `self` in case of overflow. pub fn pushed_with_interior( self, new: impl Into, diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index 80411ab5a22..b8923a4d5c6 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -142,7 +142,7 @@ impl> ShouldExecute for AllowTopLevelPaidExecutionFrom /// In the above example, `AllowUnpaidExecutionFrom` appears once underneath /// `WithComputedOrigin`. This is in order to distinguish between messages which are notionally /// from a derivative location of `ParentLocation` but that just happened to be sent via -/// `ParentLocaction` rather than messages that were sent by the parent. +/// `ParentLocation` rather than messages that were sent by the parent. /// /// Similarly `AllowTopLevelPaidExecutionFrom` appears twice: once inside of `WithComputedOrigin` /// where we provide the list of origins which are derivative origins, and then secondly outside diff --git a/polkadot/xcm/xcm-builder/src/tests/origins.rs b/polkadot/xcm/xcm-builder/src/tests/origins.rs index c717d1e2af8..b6a1a9f1052 100644 --- a/polkadot/xcm/xcm-builder/src/tests/origins.rs +++ b/polkadot/xcm/xcm-builder/src/tests/origins.rs @@ -80,8 +80,8 @@ fn universal_origin_should_work() { fn export_message_should_work() { // Bridge chain (assumed to be Relay) lets Parachain #1 have message execution for free. AllowUnpaidFrom::set(vec![[Parachain(1)].into()]); - // Local parachain #1 issues a transfer asset on Polkadot Relay-chain, transfering 100 Planck to - // Polkadot parachain #2. + // Local parachain #1 issues a transfer asset on Polkadot Relay-chain, transferring 100 Planck + // to Polkadot parachain #2. let expected_message = Xcm(vec![TransferAsset { assets: (Here, 100u128).into(), beneficiary: Parachain(2).into(), diff --git a/polkadot/xcm/xcm-builder/tests/scenarios.rs b/polkadot/xcm/xcm-builder/tests/scenarios.rs index db37f85acdb..ee1aeffbb4e 100644 --- a/polkadot/xcm/xcm-builder/tests/scenarios.rs +++ b/polkadot/xcm/xcm-builder/tests/scenarios.rs @@ -132,7 +132,7 @@ fn report_holding_works() { assets: AllCounted(1).into(), beneficiary: OnlyChild.into(), // invalid destination }, - // is not triggered becasue the deposit fails + // is not triggered because the deposit fails ReportHolding { response_info: response_info.clone(), assets: All.into() }, ]); let mut hash = fake_message_hash(&message); diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index da7fc0d9782..1d1ee40d092 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -328,7 +328,7 @@ fn query_response_elicits_handler() { /// Simulates a cross-chain message from Parachain to Parachain through Relay Chain /// that deposits assets into the reserve of the destination. -/// Regression test for `DepostiReserveAsset` changes in +/// Regression test for `DepositReserveAsset` changes in /// #[test] fn deposit_reserve_asset_works_for_any_xcm_sender() { diff --git a/polkadot/xcm/xcm-executor/src/traits/should_execute.rs b/polkadot/xcm/xcm-executor/src/traits/should_execute.rs index 449e82b5a6e..12e8fd6b87f 100644 --- a/polkadot/xcm/xcm-executor/src/traits/should_execute.rs +++ b/polkadot/xcm/xcm-executor/src/traits/should_execute.rs @@ -18,7 +18,7 @@ use frame_support::traits::ProcessMessageError; use sp_std::result::Result; use xcm::latest::{Instruction, Location, Weight, XcmHash}; -/// Properyies of an XCM message and its imminent execution. +/// Properties of an XCM message and its imminent execution. #[derive(Clone, Eq, PartialEq, Debug)] pub struct Properties { /// The amount of weight that the system has determined this diff --git a/polkadot/xcm/xcm-simulator/example/src/lib.rs b/polkadot/xcm/xcm-simulator/example/src/lib.rs index d134957fbc1..13210179e91 100644 --- a/polkadot/xcm/xcm-simulator/example/src/lib.rs +++ b/polkadot/xcm/xcm-simulator/example/src/lib.rs @@ -424,7 +424,7 @@ mod tests { /// Scenario: /// The relay-chain transfers an NFT into a parachain's sovereign account, who then mints a - /// trustless-backed-derivated locally. + /// trustless-backed-derived locally. /// /// Asserts that the parachain accounts are updated as expected. #[test] @@ -479,7 +479,7 @@ mod tests { assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); }); ParaA::execute_with(|| { - log::debug!(target: "xcm-exceutor", "Hello"); + log::debug!(target: "xcm-executor", "Hello"); assert_eq!( parachain::ForeignUniques::owner((Parent, GeneralIndex(2)).into(), 69u32.into()), Some(ALICE), diff --git a/prdoc/pr_3808.prdoc b/prdoc/pr_3808.prdoc new file mode 100644 index 00000000000..9b50f721df3 --- /dev/null +++ b/prdoc/pr_3808.prdoc @@ -0,0 +1,20 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix spelling mistakes in source code + +doc: + - audience: Node Operator + description: | + Some spelling mistakes in log output, error messages and tracing (prometheus/grafana) have been fixed. + - audience: Runtime Dev + description: | + Public crate changes: + - The public trait `RuntimeParameterStore` in `substrate/frame/support` had the associated type renamed from `AggregratedKeyValue` to `AggregatedKeyValue`. + - The public trait `AggregratedKeyValue` in `substrate/frame/support` was similarly renamed to `AggregatedKeyValue`. + - The public methods `test_versioning` and `test_versioning_register_only` of the `TestApi` trait in `substrate/primitives/runtime-interface/test-wasm` had the spelling of `versionning` changed to `versioning`. + - The public functions `read_trie_first_descendant_value` and `read_child_trie_first_descendant_value` in `substrate/primitives/trie` had the spelling of `descedant` changed to `descendant`. +crates: + - name: frame-support + - name: sp-runtime-interface-test-wasm + - name: sp-trie diff --git a/scripts/bridges_update_subtree.sh b/scripts/bridges_update_subtree.sh index 5c5c7a322a1..2cd6d968d2b 100755 --- a/scripts/bridges_update_subtree.sh +++ b/scripts/bridges_update_subtree.sh @@ -1,6 +1,6 @@ #!/bin/bash -# A script to udpate bridges repo as subtree to Cumulus +# A script to update bridges repo as subtree to Cumulus # Usage: # ./scripts/bridges_update_subtree.sh fetch # ./scripts/bridges_update_subtree.sh patch diff --git a/scripts/snowbridge_update_subtree.sh b/scripts/snowbridge_update_subtree.sh index 2276bb35469..c572eaa18fa 100755 --- a/scripts/snowbridge_update_subtree.sh +++ b/scripts/snowbridge_update_subtree.sh @@ -1,6 +1,6 @@ #!/bin/bash -# A script to udpate bridges repo as subtree to Cumulus +# A script to update bridges repo as subtree to Cumulus # Usage: # ./scripts/update_subtree_snowbridge.sh fetch # ./scripts/update_subtree_snowbridge.sh patch diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index 23a62cc0bd2..d04780d5f95 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -145,7 +145,7 @@ fn prepare_benchmark(client: &FullClient) -> (usize, Vec) { let src = Sr25519Keyring::Alice.pair(); let dst: MultiAddress = Sr25519Keyring::Bob.to_account_id().into(); - // Add as many tranfer extrinsics as possible into a single block. + // Add as many transfer extrinsics as possible into a single block. for nonce in 0.. { let extrinsic: OpaqueExtrinsic = create_extrinsic( client, @@ -179,7 +179,7 @@ fn block_production(c: &mut Criterion) { let node = new_node(tokio_handle.clone()); let client = &*node.client; - // Buliding the very first block is around ~30x slower than any subsequent one, + // Building the very first block is around ~30x slower than any subsequent one, // so let's make sure it's built and imported before we benchmark anything. let mut block_builder = BlockBuilderBuilder::new(client) .on_parent_block(client.chain_info().best_hash) diff --git a/substrate/bin/node/cli/src/cli.rs b/substrate/bin/node/cli/src/cli.rs index 3345afae4fd..56fbed51f8a 100644 --- a/substrate/bin/node/cli/src/cli.rs +++ b/substrate/bin/node/cli/src/cli.rs @@ -49,7 +49,7 @@ pub struct Cli { /// Possible subcommands of the main binary. #[derive(Debug, clap::Subcommand)] pub enum Subcommand { - /// The custom inspect subcommmand for decoding blocks and extrinsics. + /// The custom inspect subcommand for decoding blocks and extrinsics. #[command( name = "inspect", about = "Decode given block or extrinsic using current native runtime." diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index e4b425e6f96..dddb261a71d 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -53,7 +53,7 @@ pub type HostFunctions = ( frame_benchmarking::benchmarking::HostFunctions, ); -/// A specialized `WasmExecutor` intended to use accross substrate node. It provides all required +/// A specialized `WasmExecutor` intended to use across substrate node. It provides all required /// HostFunctions. pub type RuntimeExecutor = sc_executor::WasmExecutor; diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index 8c7b3c87315..69c96bf63a6 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -135,7 +135,7 @@ fn transaction_fee_is_correct() { // if weight of the cheapest weight would be 10^7, this would be 10^9, which is: // - 1 MILLICENTS in substrate node. // - 1 milli-dot based on current polkadot runtime. - // (this baed on assigning 0.1 CENT to the cheapest tx with `weight = 100`) + // (this based on assigning 0.1 CENT to the cheapest tx with `weight = 100`) let mut t = new_test_ext(compact_code_unwrap()); t.insert(>::hashed_key_for(alice()), new_account_info(100)); t.insert(>::hashed_key_for(bob()), new_account_info(10)); diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index df302a6453b..e5c2563905e 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -84,7 +84,7 @@ impl BenchPair { /// Drop system cache. /// -/// Will panic if cache drop is impossbile. +/// Will panic if cache drop is impossible. pub fn drop_system_cache() { #[cfg(target_os = "windows")] { @@ -173,7 +173,7 @@ impl Clone for BenchDb { // We clear system cache after db clone but before any warmups. // This populates system cache with some data unrelated to actual - // data we will be quering further under benchmark (like what + // data we will be querying further under benchmark (like what // would have happened in real system that queries random entries // from database). drop_system_cache(); @@ -443,7 +443,7 @@ impl BenchDb { BlockContentIterator::new(content, &self.keyring, client) } - /// Get cliet for this database operations. + /// Get client for this database operations. pub fn client(&mut self) -> Client { let (client, _backend, _task_executor) = Self::bench_client(self.database_type, self.directory_guard.path(), &self.keyring); diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs index 8c78030c885..dbd0437921f 100644 --- a/substrate/bin/utils/chain-spec-builder/src/lib.rs +++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs @@ -185,7 +185,7 @@ pub struct ConvertToRawCmd { /// Verifies the provided input chain spec. /// /// Silently checks if given input chain spec can be converted to raw. It allows to check if all -/// RuntimeGenesisConfig fiels are properly initialized and if the json does not contain invalid +/// RuntimeGenesisConfig fields are properly initialized and if the json does not contain invalid /// fields. #[derive(Parser, Debug, Clone)] pub struct VerifyCmd { diff --git a/substrate/bin/utils/subkey/README.md b/substrate/bin/utils/subkey/README.md index a5f27cfd370..fc1053e232d 100644 --- a/substrate/bin/utils/subkey/README.md +++ b/substrate/bin/utils/subkey/README.md @@ -77,7 +77,7 @@ below can be derived from those secrets. The output above also show the **public key** and the **Account ID**. Those are the independent from the network where you will use the key. -The **SS58 address** (or **Public Address**) of a new account is a reprensentation of the public keys of an account for +The **SS58 address** (or **Public Address**) of a new account is a representation of the public keys of an account for a given network (for instance Kusama or Polkadot). You can read more about the [SS58 format in the Substrate Docs](https://docs.substrate.io/reference/address-formats/) @@ -143,7 +143,7 @@ Secret phrase `soup lyrics media market way crouch elevator put moon useful ques SS58 Address: 5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC ``` -Using the `inspect` command (see more details below), we see that knowning only the **secret seed** is no longer +Using the `inspect` command (see more details below), we see that knowing only the **secret seed** is no longer sufficient to recover the account: ```bash diff --git a/substrate/bin/utils/subkey/src/lib.rs b/substrate/bin/utils/subkey/src/lib.rs index f3023acde40..33f28ef46a5 100644 --- a/substrate/bin/utils/subkey/src/lib.rs +++ b/substrate/bin/utils/subkey/src/lib.rs @@ -94,10 +94,10 @@ //! seed** (also called **Private Key**). Those 2 secrets are the pieces of information you MUST //! keep safe and secret. All the other information below can be derived from those secrets. //! -//! The output above also show the **public key** and the **Account ID**. Those are the independant +//! The output above also show the **public key** and the **Account ID**. Those are the independent //! from the network where you will use the key. //! -//! The **SS58 address** (or **Public Address**) of a new account is a reprensentation of the public +//! The **SS58 address** (or **Public Address**) of a new account is a representation of the public //! keys of an account for a given network (for instance Kusama or Polkadot). //! //! You can read more about the [SS58 format in the Substrate Docs](https://docs.substrate.io/reference/address-formats/) and see the list of reserved prefixes in the [SS58 Registry](https://github.com/paritytech/ss58-registry). @@ -110,7 +110,7 @@ //! //! ### Json output //! -//! `subkey` can calso generate the output as *json*. This is useful for automation. +//! `subkey` can also generate the output as *json*. This is useful for automation. //! //! command: //! @@ -163,7 +163,7 @@ //! SS58 Address: 5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC //! ``` //! -//! Using the `inspect` command (see more details below), we see that knowning only the **secret +//! Using the `inspect` command (see more details below), we see that knowing only the **secret //! seed** is no longer sufficient to recover the account: //! //! ```bash @@ -184,7 +184,7 @@ //! //! ### Inspecting a key //! -//! If you have *some data* about a key, `subkey inpsect` will help you discover more information +//! If you have *some data* about a key, `subkey inspect` will help you discover more information //! about it. //! //! If you have **secrets** that you would like to verify for instance, you can use: diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 932287ac865..1519c76c42c 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -180,7 +180,7 @@ impl ProposerFactory { /// The soft deadline indicates where we should stop attempting to add transactions /// to the block, which exhaust resources. After soft deadline is reached, /// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS` - /// transactions which exhaust resrouces, we will conclude that the block is full. + /// transactions which exhaust resources, we will conclude that the block is full. /// /// Setting the value too low will significantly limit the amount of transactions /// we try in case they exhaust resources. Setting the value too high can diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index 78e81e10d2b..4ec8527de26 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -52,7 +52,7 @@ enum GenesisSource { File(PathBuf), Binary(Cow<'static, [u8]>), /// factory function + code - //Factory and G type parameter shall be removed togheter with `ChainSpec::from_genesis` + //Factory and G type parameter shall be removed together with `ChainSpec::from_genesis` Factory(Arc G + Send + Sync>, Vec), Storage(Storage), /// build action + code @@ -264,7 +264,7 @@ struct RuntimeInnerWrapper { enum Genesis { /// (Deprecated) Contains the JSON representation of G (the native type representing the /// runtime's `RuntimeGenesisConfig` struct) (will be removed with `ChainSpec::from_genesis`) - /// without the runtime code. It is required to deserialize the legacy chainspecs genereted + /// without the runtime code. It is required to deserialize the legacy chainspecs generated /// with `ChainsSpec::from_genesis` method. Runtime(G), /// (Deprecated) Contains the JSON representation of G (the native type representing the @@ -276,13 +276,13 @@ enum Genesis { Raw(RawGenesis), /// State root hash of the genesis storage. StateRootHash(StorageData), - /// Represents the runtime genesis config in JSON format toghether with runtime code. + /// Represents the runtime genesis config in JSON format together with runtime code. RuntimeGenesis(RuntimeGenesisInner), } /// A configuration of a client. Does not include runtime storage initialization. /// Note: `genesis` field is ignored due to way how the chain specification is serialized into -/// JSON file. Refer to [`ChainSpecJsonContainer`], which flattens [`ClientSpec`] and denies uknown +/// JSON file. Refer to [`ChainSpecJsonContainer`], which flattens [`ClientSpec`] and denies unknown /// fields. #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(rename_all = "camelCase")] @@ -508,7 +508,7 @@ impl ChainSpec { self.client_spec.fork_id.as_deref() } - /// Additional loosly-typed properties of the chain. + /// Additional loosely-typed properties of the chain. /// /// Returns an empty JSON object if 'properties' not defined in config pub fn properties(&self) -> Properties { diff --git a/substrate/client/chain-spec/src/extension.rs b/substrate/client/chain-spec/src/extension.rs index f2939741535..d1f03539349 100644 --- a/substrate/client/chain-spec/src/extension.rs +++ b/substrate/client/chain-spec/src/extension.rs @@ -284,7 +284,7 @@ where } } -/// A subset of the `Extension` trait that only allows for quering extensions. +/// A subset of the `Extension` trait that only allows for querying extensions. pub trait GetExtension { /// Get an extension of specific type. fn get_any(&self, t: TypeId) -> &dyn Any; diff --git a/substrate/client/chain-spec/src/genesis_config_builder.rs b/substrate/client/chain-spec/src/genesis_config_builder.rs index c8b54f66be6..61f065e213c 100644 --- a/substrate/client/chain-spec/src/genesis_config_builder.rs +++ b/substrate/client/chain-spec/src/genesis_config_builder.rs @@ -33,7 +33,7 @@ use std::borrow::Cow; /// A utility that facilitates calling the GenesisBuilder API from the runtime wasm code blob. /// /// `EHF` type allows to specify the extended host function required for building runtime's genesis -/// config. The type will be compbined with default `sp_io::SubstrateHostFunctions`. +/// config. The type will be combined with default `sp_io::SubstrateHostFunctions`. pub struct GenesisConfigBuilderRuntimeCaller<'a, EHF = ()> where EHF: HostFunctions, diff --git a/substrate/client/chain-spec/src/lib.rs b/substrate/client/chain-spec/src/lib.rs index eab5f789f29..e8b87a60404 100644 --- a/substrate/client/chain-spec/src/lib.rs +++ b/substrate/client/chain-spec/src/lib.rs @@ -393,7 +393,7 @@ pub trait ChainSpec: BuildStorage + Send + Sync { fn protocol_id(&self) -> Option<&str>; /// Optional network fork identifier. `None` by default. fn fork_id(&self) -> Option<&str>; - /// Additional loosly-typed properties of the chain. + /// Additional loosely-typed properties of the chain. /// /// Returns an empty JSON object if 'properties' not defined in config fn properties(&self) -> Properties; diff --git a/substrate/client/cli/src/commands/vanity.rs b/substrate/client/cli/src/commands/vanity.rs index ce751613298..330a59493ef 100644 --- a/substrate/client/cli/src/commands/vanity.rs +++ b/substrate/client/cli/src/commands/vanity.rs @@ -51,7 +51,7 @@ pub struct VanityCmd { impl VanityCmd { /// Run the command pub fn run(&self) -> error::Result<()> { - let formated_seed = with_crypto_scheme!( + let formatted_seed = with_crypto_scheme!( self.crypto_scheme.scheme, generate_key( &self.pattern, @@ -62,7 +62,7 @@ impl VanityCmd { with_crypto_scheme!( self.crypto_scheme.scheme, print_from_uri( - &formated_seed, + &formatted_seed, None, self.network_scheme.network, self.output_scheme.output_type, diff --git a/substrate/client/cli/src/params/network_params.rs b/substrate/client/cli/src/params/network_params.rs index 12f19df2a68..94efb428091 100644 --- a/substrate/client/cli/src/params/network_params.rs +++ b/substrate/client/cli/src/params/network_params.rs @@ -310,7 +310,7 @@ mod tests { } #[test] - fn sync_ingores_case() { + fn sync_ignores_case() { let params = Cli::try_parse_from(["", "--sync", "wArP"]).expect("Parses network params"); assert_eq!(SyncMode::Warp, params.network_params.sync); diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs index 1be7be8eeea..e220aaac508 100644 --- a/substrate/client/consensus/aura/src/lib.rs +++ b/substrate/client/consensus/aura/src/lib.rs @@ -82,7 +82,7 @@ pub enum CompatibilityMode { None, /// Call `initialize_block` before doing any runtime calls. /// - /// Previously the node would execute `initialize_block` before fetchting the authorities + /// Previously the node would execute `initialize_block` before fetching the authorities /// from the runtime. This behaviour changed in: /// /// By calling `initialize_block` before fetching the authorities, on a block that diff --git a/substrate/client/consensus/babe/src/authorship.rs b/substrate/client/consensus/babe/src/authorship.rs index 11f5233abc6..57ee706a04f 100644 --- a/substrate/client/consensus/babe/src/authorship.rs +++ b/substrate/client/consensus/babe/src/authorship.rs @@ -59,7 +59,7 @@ pub(super) fn calculate_primary_threshold( assert!(theta > 0.0, "authority with weight 0."); // NOTE: in the equation `p = 1 - (1 - c)^theta` the value of `p` is always - // capped by `c`. For all pratical purposes `c` should always be set to a + // capped by `c`. For all practical purposes `c` should always be set to a // value < 0.5, as such in the computations below we should never be near // edge cases like `0.999999`. diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index ccf72939631..d10bdd8c7e4 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -1418,7 +1418,7 @@ where // Skip babe logic if block already in chain or importing blocks during initial sync, // otherwise the check for epoch changes will error because trying to re-import an - // epoch change or because of missing epoch data in the tree, respectivelly. + // epoch change or because of missing epoch data in the tree, respectively. if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) || block_status == BlockStatus::InChain { diff --git a/substrate/client/consensus/babe/src/migration.rs b/substrate/client/consensus/babe/src/migration.rs index bec2d0a61f4..5f1ece3ec0e 100644 --- a/substrate/client/consensus/babe/src/migration.rs +++ b/substrate/client/consensus/babe/src/migration.rs @@ -64,7 +64,7 @@ impl EpochT for EpochV0 { // Implement From for Epoch impl EpochV0 { - /// Migrate the sturct to current epoch version. + /// Migrate the struct to current epoch version. pub fn migrate(self, config: &BabeConfiguration) -> Epoch { sp_consensus_babe::Epoch { epoch_index: self.epoch_index, diff --git a/substrate/client/consensus/beefy/README.md b/substrate/client/consensus/beefy/README.md index 13f88303a97..a7956cfcd42 100644 --- a/substrate/client/consensus/beefy/README.md +++ b/substrate/client/consensus/beefy/README.md @@ -297,7 +297,7 @@ periodically on the global topic. Let's now dive into description of the message - Justification is considered worthwhile to gossip when: - It is for a recent (implementation specific) round or the latest mandatory round. - All signatures are valid and there is at least `2/3rd + 1` of them. - - Signatorees are part of the current validator set. + - Signatories are part of the current validator set. - Mandatory justifications should be announced periodically. ## Misbehavior diff --git a/substrate/client/consensus/beefy/src/communication/mod.rs b/substrate/client/consensus/beefy/src/communication/mod.rs index 6fda63688e6..09c540e3b8a 100644 --- a/substrate/client/consensus/beefy/src/communication/mod.rs +++ b/substrate/client/consensus/beefy/src/communication/mod.rs @@ -99,7 +99,7 @@ mod cost { // On-demand request was refused by peer. pub(super) const REFUSAL_RESPONSE: Rep = Rep::new(-100, "BEEFY: Proof request refused"); // On-demand request for a proof that can't be found in the backend. - pub(super) const UNKOWN_PROOF_REQUEST: Rep = Rep::new(-150, "BEEFY: Unknown proof request"); + pub(super) const UNKNOWN_PROOF_REQUEST: Rep = Rep::new(-150, "BEEFY: Unknown proof request"); } // benefit scalars for reporting peers. diff --git a/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs b/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs index d856e9748a1..ce184769fa7 100644 --- a/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs +++ b/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -170,7 +170,7 @@ where .flatten() .and_then(|hash| self.client.justifications(hash).ok().flatten()) .and_then(|justifs| justifs.get(BEEFY_ENGINE_ID).cloned()) - .ok_or_else(|| reputation_changes.push(cost::UNKOWN_PROOF_REQUEST)); + .ok_or_else(|| reputation_changes.push(cost::UNKNOWN_PROOF_REQUEST)); request .pending_response .send(netconfig::OutgoingResponse { diff --git a/substrate/client/consensus/beefy/src/keystore.rs b/substrate/client/consensus/beefy/src/keystore.rs index 2ddc938fbc6..9582c2661c3 100644 --- a/substrate/client/consensus/beefy/src/keystore.rs +++ b/substrate/client/consensus/beefy/src/keystore.rs @@ -404,7 +404,7 @@ pub mod tests { let store: BeefyKeystore = Some(store).into(); - let msg = b"are you involved or commited?"; + let msg = b"are you involved or committed?"; let sig1 = store.sign(&alice, msg).unwrap(); let sig2 = Keyring::::Alice.sign(msg); @@ -440,7 +440,7 @@ pub mod tests { let alice = Keyring::Alice.public(); - let msg = b"are you involved or commited?"; + let msg = b"are you involved or committed?"; let sig = store.sign(&alice, msg).err().unwrap(); let err = Error::Signature(expected_error_message.to_string()); @@ -463,7 +463,7 @@ pub mod tests { let store: BeefyKeystore = None.into(); let alice = Keyring::Alice.public(); - let msg = b"are you involved or commited"; + let msg = b"are you involved or committed"; let sig = store.sign(&alice, msg).err().unwrap(); let err = Error::Keystore("no Keystore".to_string()); @@ -487,7 +487,7 @@ pub mod tests { let alice = Keyring::Alice.public(); // `msg` and `sig` match - let msg = b"are you involved or commited?"; + let msg = b"are you involved or committed?"; let sig = store.sign(&alice, msg).unwrap(); assert!(BeefyKeystore::verify(&alice, &sig, msg)); diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index c8eb19621ba..7a47f286ef7 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -76,13 +76,13 @@ pub(crate) enum RoundAction { pub(crate) struct VoterOracle { /// Queue of known sessions. Keeps track of voting rounds (block numbers) within each session. /// - /// There are three voter states coresponding to three queue states: + /// There are three voter states corresponding to three queue states: /// 1. voter uninitialized: queue empty, /// 2. up-to-date - all mandatory blocks leading up to current GRANDPA finalized: queue has ONE /// element, the 'current session' where `mandatory_done == true`, /// 3. lagging behind GRANDPA: queue has [1, N] elements, where all `mandatory_done == false`. - /// In this state, everytime a session gets its mandatory block BEEFY finalized, it's popped - /// off the queue, eventually getting to state `2. up-to-date`. + /// In this state, every time a session gets its mandatory block BEEFY finalized, it's + /// popped off the queue, eventually getting to state `2. up-to-date`. sessions: VecDeque>, /// Min delta in block numbers between two blocks, BEEFY should vote on. min_block_delta: u32, diff --git a/substrate/client/consensus/common/Cargo.toml b/substrate/client/consensus/common/Cargo.toml index 7f36dfc09ef..f691e84717d 100644 --- a/substrate/client/consensus/common/Cargo.toml +++ b/substrate/client/consensus/common/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository.workspace = true -description = "Collection of common consensus specific imlementations for Substrate (client)" +description = "Collection of common consensus specific implementations for Substrate (client)" readme = "README.md" [lints] diff --git a/substrate/client/consensus/common/src/import_queue.rs b/substrate/client/consensus/common/src/import_queue.rs index 39d5bf8ed35..062e244a912 100644 --- a/substrate/client/consensus/common/src/import_queue.rs +++ b/substrate/client/consensus/common/src/import_queue.rs @@ -133,7 +133,7 @@ pub trait ImportQueue: Send { /// Start asynchronous runner for import queue. /// /// Takes an object implementing [`Link`] which allows the import queue to - /// influece the synchronization process. + /// influence the synchronization process. async fn run(self, link: Box>); } diff --git a/substrate/client/consensus/common/src/import_queue/basic_queue.rs b/substrate/client/consensus/common/src/import_queue/basic_queue.rs index 1cc7ec26fd1..125d4f104c1 100644 --- a/substrate/client/consensus/common/src/import_queue/basic_queue.rs +++ b/substrate/client/consensus/common/src/import_queue/basic_queue.rs @@ -187,7 +187,7 @@ impl ImportQueue for BasicQueue { /// Start asynchronous runner for import queue. /// /// Takes an object implementing [`Link`] which allows the import queue to - /// influece the synchronization process. + /// influence the synchronization process. async fn run(mut self, mut link: Box>) { loop { if let Err(_) = self.result_port.next_action(&mut *link).await { @@ -198,7 +198,7 @@ impl ImportQueue for BasicQueue { } } -/// Messages destinated to the background worker. +/// Messages designated to the background worker. mod worker_messages { use super::*; diff --git a/substrate/client/consensus/epochs/src/lib.rs b/substrate/client/consensus/epochs/src/lib.rs index 29bb18e147c..bbc143b7bd3 100644 --- a/substrate/client/consensus/epochs/src/lib.rs +++ b/substrate/client/consensus/epochs/src/lib.rs @@ -326,7 +326,7 @@ impl AsRef for IncrementedEpoch { /// /// The first epoch, epoch_0, is special cased by saying that it starts at /// slot number of the first block in the chain. When bootstrapping a chain, -/// there can be multiple competing block #1s, so we have to ensure that the overlayed +/// there can be multiple competing block #1s, so we have to ensure that the overlaid /// DAG doesn't get confused. /// /// The first block of every epoch should be producing a descriptor for the next @@ -655,7 +655,7 @@ where /// Revert to a specified block given its `hash` and `number`. /// This removes all the epoch changes information that were announced by - /// all the given block descendents. + /// all the given block descendants. pub fn revert>( &mut self, descendent_of_builder: D, diff --git a/substrate/client/consensus/grandpa/src/communication/mod.rs b/substrate/client/consensus/grandpa/src/communication/mod.rs index 5c7e1276297..6e87d6bf9a2 100644 --- a/substrate/client/consensus/grandpa/src/communication/mod.rs +++ b/substrate/client/consensus/grandpa/src/communication/mod.rs @@ -222,13 +222,13 @@ pub(crate) struct NetworkBridge, S: Syncing> { neighbor_sender: periodic::NeighborPacketSender, /// `NeighborPacketWorker` processing packets sent through the `NeighborPacketSender`. - // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its + // `NetworkBridge` is required to be clonable, thus one needs to be able to clone its // children, thus one has to wrap `neighbor_packet_worker` with an `Arc` `Mutex`. neighbor_packet_worker: Arc>>, /// Receiver side of the peer report stream populated by the gossip validator, forwarded to the /// gossip engine. - // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its + // `NetworkBridge` is required to be clonable, thus one needs to be able to clone its // children, thus one has to wrap gossip_validator_report_stream with an `Arc` `Mutex`. Given // that it is just an `UnboundedReceiver`, one could also switch to a // multi-producer-*multi*-consumer channel implementation. @@ -766,7 +766,7 @@ impl Sink> for OutgoingMessages { ) .ok_or_else(|| { Error::Signing(format!( - "Failed to sign GRANDPA vote for round {} targetting {:?}", + "Failed to sign GRANDPA vote for round {} targeting {:?}", self.round, target_hash )) })?; diff --git a/substrate/client/consensus/grandpa/src/communication/periodic.rs b/substrate/client/consensus/grandpa/src/communication/periodic.rs index daa75292028..9d0e76b7c80 100644 --- a/substrate/client/consensus/grandpa/src/communication/periodic.rs +++ b/substrate/client/consensus/grandpa/src/communication/periodic.rs @@ -106,7 +106,7 @@ impl Stream for NeighborPacketWorker { // Make sure the underlying task is scheduled for wake-up. // - // Note: In case poll_unpin is called after the resetted delay fires again, this + // Note: In case poll_unpin is called after the reset delay fires again, this // will drop one tick. Deemed as very unlikely and also not critical. while this.delay.poll_unpin(cx).is_ready() {} diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs index 7e42c2d45c7..14708cc89e8 100644 --- a/substrate/client/consensus/grandpa/src/tests.rs +++ b/substrate/client/consensus/grandpa/src/tests.rs @@ -1805,7 +1805,7 @@ async fn grandpa_environment_checks_if_best_block_is_descendent_of_finality_targ ); // best block is higher than finality target and it's on the same fork, - // the best block passed to the voting rule should not be overriden + // the best block passed to the voting rule should not be overridden select_chain.set_best_chain(client.expect_header(hashof10_a).unwrap()); select_chain.set_finality_target(client.expect_header(hashof5_a).unwrap().hash()); voting_rule.set_expected_best_block(hashof10_a); @@ -1940,7 +1940,7 @@ async fn justification_with_equivocation() { precommits.push(precommit); } - // we create an equivocation for the 67th validator targetting blocks #1 and #2. + // we create an equivocation for the 67th validator targeting blocks #1 and #2. // this should be accounted as "voting for all blocks" and therefore block #3 will // have 67/100 votes, reaching finality threshold. { diff --git a/substrate/client/consensus/grandpa/src/until_imported.rs b/substrate/client/consensus/grandpa/src/until_imported.rs index 14f32ecc883..f3874086b58 100644 --- a/substrate/client/consensus/grandpa/src/until_imported.rs +++ b/substrate/client/consensus/grandpa/src/until_imported.rs @@ -1002,7 +1002,7 @@ mod tests { } #[test] - fn block_global_message_wait_completed_return_none_on_block_number_missmatch() { + fn block_global_message_wait_completed_return_none_on_block_number_mismatch() { let msg_inner = test_catch_up(); let waiting_block_1 = diff --git a/substrate/client/consensus/grandpa/src/voting_rule.rs b/substrate/client/consensus/grandpa/src/voting_rule.rs index e09780739c7..c37596d20f6 100644 --- a/substrate/client/consensus/grandpa/src/voting_rule.rs +++ b/substrate/client/consensus/grandpa/src/voting_rule.rs @@ -196,7 +196,7 @@ where target_header = backend .header(target_hash) .ok()? - .expect("Header known to exist due to the existence of one of its descendents; qed"); + .expect("Header known to exist due to the existence of one of its descendants; qed"); } } diff --git a/substrate/client/consensus/grandpa/src/warp_proof.rs b/substrate/client/consensus/grandpa/src/warp_proof.rs index 29111712ec3..7169a424c14 100644 --- a/substrate/client/consensus/grandpa/src/warp_proof.rs +++ b/substrate/client/consensus/grandpa/src/warp_proof.rs @@ -249,7 +249,7 @@ impl> NetworkProvider: BlockNumberOps, { - /// Create a new istance for a given backend and authority set. + /// Create a new instance for a given backend and authority set. pub fn new( backend: Arc, authority_set: SharedAuthoritySet>, diff --git a/substrate/client/consensus/manual-seal/src/consensus/babe.rs b/substrate/client/consensus/manual-seal/src/consensus/babe.rs index 26fa8145980..bc56ce02271 100644 --- a/substrate/client/consensus/manual-seal/src/consensus/babe.rs +++ b/substrate/client/consensus/manual-seal/src/consensus/babe.rs @@ -82,7 +82,7 @@ pub struct BabeVerifier { } impl BabeVerifier { - /// create a nrew verifier + /// create a new verifier pub fn new(epoch_changes: SharedEpochChanges, client: Arc) -> BabeVerifier { BabeVerifier { epoch_changes, client } } diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs index 12636aae7a4..d9d79200531 100644 --- a/substrate/client/consensus/slots/src/lib.rs +++ b/substrate/client/consensus/slots/src/lib.rs @@ -555,7 +555,7 @@ impl SlotProportion { Self(inner.clamp(0.0, 1.0)) } - /// Returns the inner that is guaranted to be in the range `[0,1]`. + /// Returns the inner that is guaranteed to be in the range `[0,1]`. pub fn get(&self) -> f32 { self.0 } @@ -648,7 +648,7 @@ pub fn proposing_remaining_duration( } /// Calculate a slot duration lenience based on the number of missed slots from current -/// to parent. If the number of skipped slots is greated than 0 this method will apply +/// to parent. If the number of skipped slots is greater than 0 this method will apply /// an exponential backoff of at most `2^7 * slot_duration`, if no slots were skipped /// this method will return `None.` pub fn slot_lenience_exponential( @@ -680,7 +680,7 @@ pub fn slot_lenience_exponential( } /// Calculate a slot duration lenience based on the number of missed slots from current -/// to parent. If the number of skipped slots is greated than 0 this method will apply +/// to parent. If the number of skipped slots is greater than 0 this method will apply /// a linear backoff of at most `20 * slot_duration`, if no slots were skipped /// this method will return `None.` pub fn slot_lenience_linear( diff --git a/substrate/client/db/src/upgrade.rs b/substrate/client/db/src/upgrade.rs index f1e503867df..475220d9991 100644 --- a/substrate/client/db/src/upgrade.rs +++ b/substrate/client/db/src/upgrade.rs @@ -79,7 +79,7 @@ impl fmt::Display for UpgradeError { write!(f, "Database version comes from future version of the client: {}", version) }, UpgradeError::DecodingJustificationBlock => { - write!(f, "Decodoning justification block failed") + write!(f, "Decoding justification block failed") }, UpgradeError::Io(err) => write!(f, "Io error: {}", err), } diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs index abf9c4629ce..b532e0d4666 100644 --- a/substrate/client/db/src/utils.rs +++ b/substrate/client/db/src/utils.rs @@ -338,7 +338,7 @@ fn open_kvdb_rocksdb( db_config.memory_budget = memory_budget; let db = kvdb_rocksdb::Database::open(&db_config, path)?; - // write database version only after the database is succesfully opened + // write database version only after the database is successfully opened crate::upgrade::update_version(path)?; Ok(sp_database::as_database(db)) } diff --git a/substrate/client/executor/common/src/error.rs b/substrate/client/executor/common/src/error.rs index b7c7e2b7019..9d489eaae42 100644 --- a/substrate/client/executor/common/src/error.rs +++ b/substrate/client/executor/common/src/error.rs @@ -145,7 +145,7 @@ pub enum WasmError { #[error("{0}")] Instantiation(String), - /// Other error happenend. + /// Other error happened. #[error("Other error happened while constructing the runtime: {0}")] Other(String), } diff --git a/substrate/client/executor/wasmtime/src/runtime.rs b/substrate/client/executor/wasmtime/src/runtime.rs index 595cc503272..286d134ecd1 100644 --- a/substrate/client/executor/wasmtime/src/runtime.rs +++ b/substrate/client/executor/wasmtime/src/runtime.rs @@ -462,7 +462,7 @@ pub struct Semantics { pub struct Config { /// The WebAssembly standard requires all imports of an instantiated module to be resolved, /// otherwise, the instantiation fails. If this option is set to `true`, then this behavior is - /// overriden and imports that are requested by the module and not provided by the host + /// overridden and imports that are requested by the module and not provided by the host /// functions will be resolved using stubs. These stubs will trap upon a call. pub allow_missing_func_imports: bool, diff --git a/substrate/client/executor/wasmtime/src/tests.rs b/substrate/client/executor/wasmtime/src/tests.rs index 1c06da1e3c1..f86a4275769 100644 --- a/substrate/client/executor/wasmtime/src/tests.rs +++ b/substrate/client/executor/wasmtime/src/tests.rs @@ -254,7 +254,7 @@ fn test_nan_canonicalization(instantiation_strategy: InstantiationStrategy) { /// A NaN with canonical payload bits. const CANONICAL_NAN_BITS: u32 = 0x7fc00000; - /// A NaN value with an abitrary payload. + /// A NaN value with an arbitrary payload. const ARBITRARY_NAN_BITS: u32 = 0x7f812345; // This test works like this: we essentially do @@ -272,7 +272,7 @@ fn test_nan_canonicalization(instantiation_strategy: InstantiationStrategy) { // However, with the `canonicalize_nans` option turned on above, we expect that the output will // be a canonical NaN. // - // We exterpolate the results of this tests so that we assume that all intermediate computations + // We extrapolate the results of this tests so that we assume that all intermediate computations // that involve floats are sanitized and cannot produce a non-deterministic NaN. let params = (u32::to_le_bytes(ARBITRARY_NAN_BITS), u32::to_le_bytes(1)).encode(); diff --git a/substrate/client/network/bitswap/src/lib.rs b/substrate/client/network/bitswap/src/lib.rs index 0586354d6a0..1ba95e30bad 100644 --- a/substrate/client/network/bitswap/src/lib.rs +++ b/substrate/client/network/bitswap/src/lib.rs @@ -301,7 +301,7 @@ mod tests { use substrate_test_runtime_client::{self, prelude::*, TestClientBuilder}; #[tokio::test] - async fn undecodeable_message() { + async fn undecodable_message() { let client = substrate_test_runtime_client::new(); let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client)); diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs index 9ad41e376e8..b945d4bfc60 100644 --- a/substrate/client/network/src/protocol/notifications/behaviour.rs +++ b/substrate/client/network/src/protocol/notifications/behaviour.rs @@ -2668,7 +2668,7 @@ mod tests { // // there is not straight-forward way of adding backoff to `PeerState::Disabled` // so manually adjust the value in order to progress on to the next stage. - // This modification together with `ConnectionClosed` will conver the peer + // This modification together with `ConnectionClosed` will convert the peer // state into `PeerState::Backoff`. if let Some(PeerState::Disabled { ref mut backoff_until, .. }) = notif.peers.get_mut(&(peer, set_id)) diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs index 28662be29fe..391252c3ffe 100644 --- a/substrate/client/network/src/protocol/notifications/handler.rs +++ b/substrate/client/network/src/protocol/notifications/handler.rs @@ -211,7 +211,7 @@ enum State { /// consequently trying to open the various notifications substreams. /// /// A [`NotifsHandlerOut::OpenResultOk`] or a [`NotifsHandlerOut::OpenResultErr`] event must - /// be emitted when transitionning to respectively [`State::Open`] or [`State::Closed`]. + /// be emitted when transitioning to respectively [`State::Open`] or [`State::Closed`]. Opening { /// Substream opened by the remote. If `Some`, has been accepted. in_substream: Option>, diff --git a/substrate/client/network/src/protocol/notifications/service/mod.rs b/substrate/client/network/src/protocol/notifications/service/mod.rs index 62e6d88a3d5..6d9873f45d5 100644 --- a/substrate/client/network/src/protocol/notifications/service/mod.rs +++ b/substrate/client/network/src/protocol/notifications/service/mod.rs @@ -54,7 +54,7 @@ const COMMAND_QUEUE_SIZE: usize = 64; /// Type representing subscribers of a notification protocol. type Subscribers = Arc>>>; -/// Type represending a distributable message sink. +/// Type representing a distributable message sink. /// Detached message sink must carry the protocol name for registering metrics. /// /// See documentation for [`PeerContext`] for more details. @@ -175,11 +175,11 @@ pub enum NotificationCommand { /// and an additional, distributable `NotificationsSink` which the protocol may acquire /// if it wishes to send notifications through `NotificationsSink` directly. /// -/// The distributable `NoticationsSink` is wrapped in an `Arc>` to allow +/// The distributable `NotificationsSink` is wrapped in an `Arc>` to allow /// `NotificationsService` to swap the underlying sink in case it's replaced. #[derive(Debug, Clone)] struct PeerContext { - /// Sink for sending notificaitons. + /// Sink for sending notifications. sink: NotificationsSink, /// Distributable notification sink. diff --git a/substrate/client/network/src/protocol/notifications/service/tests.rs b/substrate/client/network/src/protocol/notifications/service/tests.rs index 02ba9e1711c..238e0ccf566 100644 --- a/substrate/client/network/src/protocol/notifications/service/tests.rs +++ b/substrate/client/network/src/protocol/notifications/service/tests.rs @@ -437,7 +437,7 @@ async fn peer_disconnects_then_async_notification_is_sent() { notif.send_async_notification(&peer_id, vec![1, 3, 3, 7]).await { } else { - panic!("invalid state after calling `send_async_notificatio()` on closed connection") + panic!("invalid state after calling `send_async_notification()` on closed connection") } } diff --git a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs index 4e1c033f33b..85209a888cd 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -188,7 +188,7 @@ where } } -/// Yielded by the [`NotificationsIn`] after a successfuly upgrade. +/// Yielded by the [`NotificationsIn`] after a successfully upgrade. pub struct NotificationsInOpen { /// Handshake sent by the remote. pub handshake: Vec, @@ -415,7 +415,7 @@ where } } -/// Yielded by the [`NotificationsOut`] after a successfuly upgrade. +/// Yielded by the [`NotificationsOut`] after a successfully upgrade. pub struct NotificationsOutOpen { /// Handshake returned by the remote. pub handshake: Vec, diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs index 4c8f119baa2..7f851fd8e9c 100644 --- a/substrate/client/network/src/protocol_controller.rs +++ b/substrate/client/network/src/protocol_controller.rs @@ -448,7 +448,7 @@ impl ProtocolController { self.peer_store.report_disconnect(peer_id); } - /// Ask `Peerset` if the peer has a reputation value not sufficent for connection with it. + /// Ask `Peerset` if the peer has a reputation value not sufficient for connection with it. fn is_banned(&self, peer_id: &PeerId) -> bool { self.peer_store.is_banned(peer_id) } @@ -2020,7 +2020,7 @@ mod tests { ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); assert!(matches!(controller.reserved_nodes.get(&reserved1), Some(PeerState::NotConnected))); - // Initiate connectios + // Initiate connections controller.alloc_slots(); assert!(matches!(controller.reserved_nodes.get(&reserved1), Some(PeerState::NotConnected))); assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 24640fdc455..ff40ae95624 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -110,7 +110,7 @@ const INACTIVITY_EVICT_THRESHOLD: Duration = Duration::from_secs(30); /// Parachain collator may incorrectly get evicted because it's waiting to receive a number of /// relaychain blocks before it can start creating parachain blocks. During this wait, /// `SyncingEngine` still counts it as active and as the peer is not sending blocks, it may get -/// evicted if a block is not received within the first 30 secons since the peer connected. +/// evicted if a block is not received within the first 30 seconds since the peer connected. /// /// To prevent this from happening, define a threshold for how long `SyncingEngine` should wait /// before it starts evicting peers. @@ -424,7 +424,7 @@ where .expect("Genesis block exists; qed"), ); - // Split warp sync params into warp sync config and a channel to retreive target block + // Split warp sync params into warp sync config and a channel to retrieve target block // header. let (warp_sync_config, warp_sync_target_block_header_rx) = warp_sync_params.map_or((None, None), |params| { @@ -1057,7 +1057,7 @@ where // still be under validation. If the peer has different genesis than the // local node the validation fails but the peer cannot be reported in // `validate_connection()` as that is also called by - // `ValiateInboundSubstream` which means that the peer is still being + // `ValidateInboundSubstream` which means that the peer is still being // validated and banning the peer when handling that event would // result in peer getting dropped twice. // diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index dabcf37ae63..610fd7c6560 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -185,7 +185,7 @@ where + Sync + 'static, { - /// Initialize a new syncing startegy. + /// Initialize a new syncing strategy. pub fn new( config: SyncingConfig, client: Arc, @@ -418,7 +418,7 @@ where self.state.is_some() || match self.chain_sync { Some(ref s) => s.status().state.is_major_syncing(), - None => unreachable!("At least one syncing startegy is active; qed"), + None => unreachable!("At least one syncing strategy is active; qed"), } } @@ -429,7 +429,7 @@ where /// Returns the current sync status. pub fn status(&self) -> SyncStatus { - // This function presumes that startegies are executed serially and must be refactored + // This function presumes that strategies are executed serially and must be refactored // once we have parallel strategies. if let Some(ref warp) = self.warp { warp.status() @@ -438,7 +438,7 @@ where } else if let Some(ref chain_sync) = self.chain_sync { chain_sync.status() } else { - unreachable!("At least one syncing startegy is always active; qed") + unreachable!("At least one syncing strategy is always active; qed") } } @@ -518,7 +518,7 @@ where /// Proceed with the next strategy if the active one finished. pub fn proceed_to_next(&mut self) -> Result<(), ClientError> { - // The strategies are switched as `WarpSync` -> `StateStartegy` -> `ChainSync`. + // The strategies are switched as `WarpSync` -> `StateStrategy` -> `ChainSync`. if let Some(ref mut warp) = self.warp { match warp.take_result() { Some(res) => { @@ -569,7 +569,7 @@ where }, } } else if let Some(state) = &self.state { - if state.is_succeded() { + if state.is_succeeded() { info!(target: LOG_TARGET, "State sync is complete, continuing with block sync."); } else { error!(target: LOG_TARGET, "State sync failed. Falling back to full sync."); diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index ad0c75363e7..da04bbbeccc 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -117,7 +117,7 @@ mod rep { /// Reputation change for peers which send us a block with bad justifications. pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification"); - /// Reputation change when a peer sent us invlid ancestry result. + /// Reputation change when a peer sent us invalid ancestry result. pub const UNKNOWN_ANCESTOR: Rep = Rep::new(-(1 << 16), "DB Error"); /// Peer response data does not have requested bits. @@ -1334,7 +1334,7 @@ where PeerSyncState::DownloadingJustification(_) => { // Peers that were downloading justifications // should be kept in that state. - // We make sure our commmon number is at least something we have. + // We make sure our common number is at least something we have. trace!( target: LOG_TARGET, "Keeping peer {} after restart, updating common number from={} => to={} (our best).", diff --git a/substrate/client/network/sync/src/strategy/chain_sync/test.rs b/substrate/client/network/sync/src/strategy/chain_sync/test.rs index 127b6862f0e..cd955113542 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync/test.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync/test.rs @@ -189,7 +189,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { assert_eq!(sync.peers.get(&peer_id3).unwrap().common_number, 50); } -/// Send a block annoucnement for the given `header`. +/// Send a block announcement for the given `header`. fn send_block_announce(header: Header, peer_id: PeerId, sync: &mut ChainSync) { let announce = BlockAnnounce { header: header.clone(), @@ -278,7 +278,7 @@ fn unwrap_from_block_number(from: FromBlock) -> u64 { /// announcement from this node in its sync process. Meaning our common number didn't change. It /// is now expected that we start an ancestor search to find the common number. #[test] -fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { +fn do_ancestor_search_when_common_block_to_best_queued_gap_is_to_big() { sp_tracing::try_init_simple(); let blocks = { @@ -472,7 +472,7 @@ fn can_sync_huge_fork() { let actions = sync.take_actions().collect::>(); request = if actions.is_empty() { - // We found the ancenstor + // We found the ancestor break } else { assert_eq!(actions.len(), 1); @@ -607,7 +607,7 @@ fn syncs_fork_without_duplicate_requests() { let actions = sync.take_actions().collect::>(); request = if actions.is_empty() { - // We found the ancenstor + // We found the ancestor break } else { assert_eq!(actions.len(), 1); diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index 12d36ff9e01..6d3b215f7f3 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -79,7 +79,7 @@ pub struct StateStrategy { state_sync: Box>, peers: HashMap>, actions: Vec>, - succeded: bool, + succeeded: bool, } impl StateStrategy { @@ -110,7 +110,7 @@ impl StateStrategy { )), peers, actions: Vec::new(), - succeded: false, + succeeded: false, } } @@ -129,7 +129,7 @@ impl StateStrategy { }) .collect(), actions: Vec::new(), - succeded: false, + succeeded: false, } } @@ -260,7 +260,7 @@ impl StateStrategy { "Failed to import target block with state: {e:?}." ); }); - self.succeded |= results.into_iter().any(|result| result.is_ok()); + self.succeeded |= results.into_iter().any(|result| result.is_ok()); self.actions.push(StateStrategyAction::Finished); } } @@ -342,10 +342,10 @@ impl StateStrategy { std::mem::take(&mut self.actions).into_iter() } - /// Check if state sync has succeded. + /// Check if state sync has succeeded. #[must_use] - pub fn is_succeded(&self) -> bool { - self.succeded + pub fn is_succeeded(&self) -> bool { + self.succeeded } } @@ -669,7 +669,7 @@ mod test { } #[test] - fn succesfully_importing_target_block_finishes_strategy() { + fn successfully_importing_target_block_finishes_strategy() { let target_hash = Hash::random(); let mut state_sync_provider = MockStateSync::::new(); state_sync_provider.expect_target_hash().return_const(target_hash); diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index 7935b5f29b6..c7b79228efe 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -968,7 +968,7 @@ mod test { warp_sync.on_warp_proof_response(&request_peer_id, EncodedProof(Vec::new())); - // We only interested in alredy generated actions, not new requests. + // We only interested in already generated actions, not new requests. let actions = std::mem::take(&mut warp_sync.actions); assert_eq!(actions.len(), 1); assert!(matches!( diff --git a/substrate/client/network/sync/src/warp_request_handler.rs b/substrate/client/network/sync/src/warp_request_handler.rs index 39cf1c5d806..eda67cac95f 100644 --- a/substrate/client/network/sync/src/warp_request_handler.rs +++ b/substrate/client/network/sync/src/warp_request_handler.rs @@ -57,7 +57,7 @@ pub fn generate_request_response_config>( } } -/// Generate the grandpa warp sync protocol name from the genesi hash and fork id. +/// Generate the grandpa warp sync protocol name from the genesis hash and fork id. fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { let genesis_hash = genesis_hash.as_ref(); if let Some(fork_id) = fork_id { diff --git a/substrate/client/network/test/src/sync.rs b/substrate/client/network/test/src/sync.rs index c025a8262f0..f1c1b741430 100644 --- a/substrate/client/network/test/src/sync.rs +++ b/substrate/client/network/test/src/sync.rs @@ -749,7 +749,7 @@ async fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { } } -/// Waits for some time until the validation is successfull. +/// Waits for some time until the validation is successful. struct DeferredBlockAnnounceValidator; impl BlockAnnounceValidator for DeferredBlockAnnounceValidator { diff --git a/substrate/client/offchain/src/api/http.rs b/substrate/client/offchain/src/api/http.rs index 7ca5e3fd13a..46f573341c5 100644 --- a/substrate/client/offchain/src/api/http.rs +++ b/substrate/client/offchain/src/api/http.rs @@ -604,7 +604,7 @@ enum WorkerToApi { /// because we don't want the `HttpApi` to have to drive the reading. /// Instead, reading an item from the channel will notify the worker task, which will push /// the next item. - /// Can also be used to send an error, in case an error happend on the HTTP socket. After + /// Can also be used to send an error, in case an error happened on the HTTP socket. After /// an error is sent, the channel will close. body: mpsc::Receiver>, }, diff --git a/substrate/client/rpc-servers/src/middleware/metrics.rs b/substrate/client/rpc-servers/src/middleware/metrics.rs index 17849dc0c44..688c3c2a9fc 100644 --- a/substrate/client/rpc-servers/src/middleware/metrics.rs +++ b/substrate/client/rpc-servers/src/middleware/metrics.rs @@ -182,7 +182,7 @@ impl RpcMetrics { transport_label, req.method_name(), // the label "is_error", so `success` should be regarded as false - // and vice-versa to be registrered correctly. + // and vice-versa to be registered correctly. if rp.is_success() { "false" } else { "true" }, if is_rate_limited { "true" } else { "false" }, ]) diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index 1803ffa3a31..de71ed82a12 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -435,7 +435,7 @@ async fn archive_storage_closest_merkle_value() { /// The core of this test. /// - /// Checks keys that are exact match, keys with descedant and keys that should not return + /// Checks keys that are exact match, keys with descendant and keys that should not return /// values. /// /// Returns (key, merkle value) pairs. @@ -459,7 +459,7 @@ async fn archive_storage_closest_merkle_value() { query_type: StorageQueryType::ClosestDescendantMerkleValue, pagination_start_key: None, }, - // Key with descedent. + // Key with descendant. PaginatedStorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/event.rs b/substrate/client/rpc-spec-v2/src/chain_head/event.rs index e0c804d16eb..bd986306091 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/event.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/event.rs @@ -111,7 +111,7 @@ impl From for RuntimeEvent { #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Initialized { - /// The hash of the lastest finalized blocks. + /// The hash of the latest finalized blocks. pub finalized_block_hashes: Vec, /// The runtime version of the finalized block. /// @@ -315,7 +315,7 @@ pub enum FollowEvent { Stop, } -/// The method respose of `chainHead_body`, `chainHead_call` and `chainHead_storage`. +/// The method response of `chainHead_body`, `chainHead_call` and `chainHead_storage`. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(tag = "result")] diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 4d9dfb24e0a..30152efb5b6 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -2786,7 +2786,7 @@ async fn ensure_operation_limits_works() { FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id ); - // The storage is finished and capactiy must be released. + // The storage is finished and capacity must be released. let alice_id = AccountKeyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); @@ -3113,7 +3113,7 @@ async fn storage_closest_merkle_value() { /// The core of this test. /// - /// Checks keys that are exact match, keys with descedant and keys that should not return + /// Checks keys that are exact match, keys with descendant and keys that should not return /// values. /// /// Returns (key, merkle value) pairs. @@ -3139,7 +3139,7 @@ async fn storage_closest_merkle_value() { key: hex_string(b":AAAB"), query_type: StorageQueryType::ClosestDescendantMerkleValue }, - // Key with descedent. + // Key with descendant. StorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs index 690a1a64d74..77a28968aed 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -372,7 +372,7 @@ async fn tx_broadcast_resubmits_invalid_tx() { client_mock.trigger_import_stream(block_2_header).await; // Ensure we propagate the temporary ban error to `submit_and_watch`. - // This ensures we'll loop again with the next annmounced block and try to resubmit the + // This ensures we'll loop again with the next announced block and try to resubmit the // transaction. The transaction remains temporarily banned until the pool is maintained. let event = get_next_event!(&mut pool_middleware); assert_matches!(event, MiddlewarePoolEvent::PoolError { transaction, err } if transaction == xt && err.contains("Transaction temporarily Banned")); @@ -429,7 +429,7 @@ async fn tx_broadcast_resubmits_invalid_tx() { } /// This is similar to `tx_broadcast_resubmits_invalid_tx`. -/// However, it forces the tx to be resubmited because of the pool +/// However, it forces the tx to be resubmitted because of the pool /// limits. Which is a different code path than the invalid tx. #[tokio::test] async fn tx_broadcast_resubmits_dropped_tx() { @@ -509,7 +509,7 @@ async fn tx_broadcast_resubmits_dropped_tx() { pool.inner_pool.maintain(event).await; client_mock.trigger_import_stream(block_3_header.clone()).await; - // The first tx is in a finalzied block; the future tx must enter the pool. + // The first tx is in a finalized block; the future tx must enter the pool. let events = get_next_tx_events!(&mut pool_middleware, 3); assert_eq!( events.get(¤t_xt).unwrap(), diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs index 92c83826187..6eaf50d6b2e 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs @@ -44,7 +44,7 @@ pub struct TransactionBroadcast { pool: Arc, /// Executor to spawn subscriptions. executor: SubscriptionTaskExecutor, - /// The brodcast operation IDs. + /// The broadcast operation IDs. broadcast_ids: Arc>>, } @@ -200,7 +200,7 @@ where } } -/// Returns the last element of the providided stream, or `None` if the stream is closed. +/// Returns the last element of the provided stream, or `None` if the stream is closed. async fn last_stream_element(stream: &mut S) -> Option where S: Stream + Unpin, diff --git a/substrate/client/rpc/src/statement/mod.rs b/substrate/client/rpc/src/statement/mod.rs index b4f432bbbb0..e99135aec38 100644 --- a/substrate/client/rpc/src/statement/mod.rs +++ b/substrate/client/rpc/src/statement/mod.rs @@ -89,7 +89,7 @@ impl StatementApiServer for StatementStore { fn submit(&self, encoded: Bytes) -> RpcResult<()> { let statement = Decode::decode(&mut &*encoded) - .map_err(|e| Error::StatementStore(format!("Eror decoding statement: {:?}", e)))?; + .map_err(|e| Error::StatementStore(format!("Error decoding statement: {:?}", e)))?; match self.store.submit(statement, StatementSource::Local) { SubmitResult::New(_) | SubmitResult::Known => Ok(()), // `KnownExpired` should not happen. Expired statements submitted with diff --git a/substrate/client/service/src/chain_ops/import_blocks.rs b/substrate/client/service/src/chain_ops/import_blocks.rs index 34f7669d010..661fc09a8f1 100644 --- a/substrate/client/service/src/chain_ops/import_blocks.rs +++ b/substrate/client/service/src/chain_ops/import_blocks.rs @@ -73,7 +73,7 @@ where reader: CodecIoReader, }, Json { - // Nubmer of blocks we have decoded thus far. + // Number of blocks we have decoded thus far. read_block_count: u64, // Stream to the data, used for decoding new blocks. reader: StreamDeserializer<'static, JsonIoRead, SignedBlock>, diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs index 35262ff493b..59e307d7f93 100644 --- a/substrate/client/service/src/config.rs +++ b/substrate/client/service/src/config.rs @@ -236,7 +236,7 @@ impl Configuration { ProtocolId::from(protocol_id_full) } - /// Returns true if the genesis state writting will be skipped while initializing the genesis + /// Returns true if the genesis state writing will be skipped while initializing the genesis /// block. pub fn no_genesis(&self) -> bool { matches!(self.network.sync_mode, SyncMode::LightState { .. } | SyncMode::Warp { .. }) diff --git a/substrate/client/state-db/src/lib.rs b/substrate/client/state-db/src/lib.rs index 41c231c31aa..6c37c87a611 100644 --- a/substrate/client/state-db/src/lib.rs +++ b/substrate/client/state-db/src/lib.rs @@ -872,7 +872,7 @@ mod tests { fn check_stored_and_requested_mode_compatibility( mode_when_created: Option, mode_when_reopened: Option, - expected_effective_mode_when_reopenned: Result, + expected_effective_mode_when_reopened: Result, ) { let mut db = make_db(&[]); let (state_db_init, state_db) = @@ -883,7 +883,7 @@ mod tests { let state_db_reopen_result = StateDb::::open(db.clone(), mode_when_reopened, false, false); - if let Ok(expected_mode) = expected_effective_mode_when_reopenned { + if let Ok(expected_mode) = expected_effective_mode_when_reopened { let (state_db_init, state_db_reopened) = state_db_reopen_result.unwrap(); db.commit(&state_db_init); assert_eq!(state_db_reopened.pruning_mode(), expected_mode,) diff --git a/substrate/client/state-db/src/noncanonical.rs b/substrate/client/state-db/src/noncanonical.rs index bdbe8318371..4492a7bd077 100644 --- a/substrate/client/state-db/src/noncanonical.rs +++ b/substrate/client/state-db/src/noncanonical.rs @@ -46,26 +46,26 @@ pub struct NonCanonicalOverlay { #[cfg_attr(test, derive(PartialEq, Debug))] struct OverlayLevel { blocks: Vec>, - used_indicies: u64, // Bitmask of available journal indicies. + used_indices: u64, // Bitmask of available journal indices. } impl OverlayLevel { fn push(&mut self, overlay: BlockOverlay) { - self.used_indicies |= 1 << overlay.journal_index; + self.used_indices |= 1 << overlay.journal_index; self.blocks.push(overlay) } fn available_index(&self) -> u64 { - self.used_indicies.trailing_ones() as u64 + self.used_indices.trailing_ones() as u64 } fn remove(&mut self, index: usize) -> BlockOverlay { - self.used_indicies &= !(1 << self.blocks[index].journal_index); + self.used_indices &= !(1 << self.blocks[index].journal_index); self.blocks.remove(index) } fn new() -> OverlayLevel { - OverlayLevel { blocks: Vec::new(), used_indicies: 0 } + OverlayLevel { blocks: Vec::new(), used_indices: 0 } } } diff --git a/substrate/client/telemetry/src/lib.rs b/substrate/client/telemetry/src/lib.rs index 113d8303a20..7e3a4ee8639 100644 --- a/substrate/client/telemetry/src/lib.rs +++ b/substrate/client/telemetry/src/lib.rs @@ -413,7 +413,7 @@ impl Telemetry { .map_err(|_| Error::TelemetryWorkerDropped) } - /// Make a new cloneable handle to this [`Telemetry`]. This is used for reporting telemetries. + /// Make a new clonable handle to this [`Telemetry`]. This is used for reporting telemetries. pub fn handle(&self) -> TelemetryHandle { TelemetryHandle { message_sender: Arc::new(Mutex::new(self.message_sender.clone())), diff --git a/substrate/client/transaction-pool/README.md b/substrate/client/transaction-pool/README.md index b55dc6482d6..7a53727d576 100644 --- a/substrate/client/transaction-pool/README.md +++ b/substrate/client/transaction-pool/README.md @@ -171,7 +171,7 @@ This parameter instructs the pool propagate/gossip a transaction to node peers. By default this should be `true`, however in some cases it might be undesirable to propagate transactions further. Examples might include heavy transactions produced by block authors in offchain workers (DoS) or risking being front -runned by someone else after finding some non trivial solution or equivocation, +ran by someone else after finding some non trivial solution or equivocation, etc. ### 'TransactionSource` diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 461b9860d41..49bd2203c12 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -999,7 +999,7 @@ fn import_notification_to_pool_maintain_works() { .0, ); - // Prepare the extrisic, push it to the pool and check that it was added. + // Prepare the extrinsic, push it to the pool and check that it was added. let xt = uxt(Alice, 0); block_on(pool.submit_one( pool.api().block_id_to_hash(&BlockId::Number(0)).unwrap().unwrap(), diff --git a/substrate/client/utils/src/notification.rs b/substrate/client/utils/src/notification.rs index dabb85d613c..3f606aabe3a 100644 --- a/substrate/client/utils/src/notification.rs +++ b/substrate/client/utils/src/notification.rs @@ -44,7 +44,7 @@ mod tests; /// and identify the mpsc channels. pub trait TracingKeyStr { /// Const `str` representing the "tracing key" used to tag and identify - /// the mpsc channels owned by the object implemeting this trait. + /// the mpsc channels owned by the object implementing this trait. const TRACING_KEY: &'static str; } diff --git a/substrate/frame/alliance/README.md b/substrate/frame/alliance/README.md index 9930008e2d6..16335a98f59 100644 --- a/substrate/frame/alliance/README.md +++ b/substrate/frame/alliance/README.md @@ -58,7 +58,7 @@ to update the Alliance's rule and make announcements. - `add_unscrupulous_items` - Add some items, either accounts or websites, to the list of unscrupulous items. - `remove_unscrupulous_items` - Remove some items from the list of unscrupulous items. -- `abdicate_fellow_status` - Abdicate one's voting rights, demoting themself to Ally. +- `abdicate_fellow_status` - Abdicate one's voting rights, demoting themselves to Ally. #### Root Calls diff --git a/substrate/frame/alliance/src/lib.rs b/substrate/frame/alliance/src/lib.rs index d4703db68db..414d550c53a 100644 --- a/substrate/frame/alliance/src/lib.rs +++ b/substrate/frame/alliance/src/lib.rs @@ -505,10 +505,10 @@ pub mod pallet { proposal: Box<>::Proposal>, #[pallet::compact] length_bound: u32, ) -> DispatchResult { - let proposor = ensure_signed(origin)?; - ensure!(Self::has_voting_rights(&proposor), Error::::NoVotingRights); + let proposer = ensure_signed(origin)?; + ensure!(Self::has_voting_rights(&proposer), Error::::NoVotingRights); - T::ProposalProvider::propose_proposal(proposor, threshold, proposal, length_bound)?; + T::ProposalProvider::propose_proposal(proposer, threshold, proposal, length_bound)?; Ok(()) } diff --git a/substrate/frame/alliance/src/tests.rs b/substrate/frame/alliance/src/tests.rs index 710de5a54bc..c65f10228e7 100644 --- a/substrate/frame/alliance/src/tests.rs +++ b/substrate/frame/alliance/src/tests.rs @@ -26,7 +26,7 @@ use crate::mock::*; type AllianceMotionEvent = pallet_collective::Event; fn assert_powerless(user: RuntimeOrigin, user_is_member: bool) { - //vote / veto with a valid propsal + //vote / veto with a valid proposal let cid = test_cid(); let (proposal, _, _) = make_kick_member_proposal(42); diff --git a/substrate/frame/asset-conversion/src/lib.rs b/substrate/frame/asset-conversion/src/lib.rs index c9725f9d39d..0bf73e8809c 100644 --- a/substrate/frame/asset-conversion/src/lib.rs +++ b/substrate/frame/asset-conversion/src/lib.rs @@ -205,7 +205,7 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A successful call of the `CretaPool` extrinsic will create this event. + /// A successful call of the `CreatePool` extrinsic will create this event. PoolCreated { /// The account that created the pool. creator: T::AccountId, diff --git a/substrate/frame/asset-rate/src/lib.rs b/substrate/frame/asset-rate/src/lib.rs index befabfe54aa..69f8267a4f2 100644 --- a/substrate/frame/asset-rate/src/lib.rs +++ b/substrate/frame/asset-rate/src/lib.rs @@ -112,7 +112,7 @@ pub mod pallet { /// The origin permissioned to remove an existing conversion rate for an asset. type RemoveOrigin: EnsureOrigin; - /// The origin permissioned to update an existiing conversion rate for an asset. + /// The origin permissioned to update an existing conversion rate for an asset. type UpdateOrigin: EnsureOrigin; /// The currency mechanism for this pallet. diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index 583c75b3827..c5468e4237d 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -183,7 +183,7 @@ pub use weights::WeightInfo; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; const LOG_TARGET: &str = "runtime::assets"; -/// Trait with callbacks that are executed after successfull asset creation or destruction. +/// Trait with callbacks that are executed after successful asset creation or destruction. pub trait AssetsCallback { /// Indicates that asset with `id` was successfully created by the `owner` fn created(_id: &AssetId, _owner: &AccountId) -> Result<(), ()> { diff --git a/substrate/frame/assets/src/tests.rs b/substrate/frame/assets/src/tests.rs index e09648a51ec..c7021bcad53 100644 --- a/substrate/frame/assets/src/tests.rs +++ b/substrate/frame/assets/src/tests.rs @@ -1453,7 +1453,7 @@ fn force_asset_status_should_work() { )); assert_eq!(Assets::balance(0, 1), 50); - // account can recieve assets for balance < min_balance + // account can receive assets for balance < min_balance assert_ok!(Assets::transfer(RuntimeOrigin::signed(2), 0, 1, 1)); assert_eq!(Assets::balance(0, 1), 51); diff --git a/substrate/frame/babe/src/benchmarking.rs b/substrate/frame/babe/src/benchmarking.rs index 92f55665913..6b0e31e8471 100644 --- a/substrate/frame/babe/src/benchmarking.rs +++ b/substrate/frame/babe/src/benchmarking.rs @@ -31,7 +31,7 @@ benchmarks! { // NOTE: generated with the test below `test_generate_equivocation_report_blob`. // the output is not deterministic since keys are generated randomly (and therefore // signature content changes). it should not affect the benchmark. - // with the current benchmark setup it is not possible to generate this programatically + // with the current benchmark setup it is not possible to generate this programmatically // from the benchmark setup. const EQUIVOCATION_PROOF_BLOB: [u8; 416] = [ 222, 241, 46, 66, 243, 228, 135, 233, 177, 64, 149, 170, 141, 92, 193, 106, 51, 73, 31, diff --git a/substrate/frame/bags-list/src/list/tests.rs b/substrate/frame/bags-list/src/list/tests.rs index fd4ad8f893a..cd39b083172 100644 --- a/substrate/frame/bags-list/src/list/tests.rs +++ b/substrate/frame/bags-list/src/list/tests.rs @@ -431,7 +431,7 @@ mod list { #[test] fn insert_at_unchecked_at_is_only_node() { // Note that this `insert_at_unchecked` test should fail post checks because node 42 does - // not get re-assigned the correct bagu pper. This is because `insert_at_unchecked` assumes + // not get re-assigned the correct bag upper. This is because `insert_at_unchecked` assumes // both nodes are already in the same bag with the correct bag upper. ExtBuilder::default().build_and_execute_no_post_check(|| { // given diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 3fd3102cd35..80278752207 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -677,7 +677,7 @@ pub mod pallet { /// /// This will waive the transaction fee if at least all but 10% of the accounts needed to /// be upgraded. (We let some not have to be upgraded just in order to allow for the - /// possibililty of churn). + /// possibility of churn). #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::upgrade_accounts(who.len() as u32))] pub fn upgrade_accounts( @@ -905,14 +905,14 @@ pub mod pallet { Self::try_mutate_account(who, |a, _| -> Result { Ok(f(a)) }) } - /// Returns `true` when `who` has some providers or `insecure_zero_ed` feature is disnabled. + /// Returns `true` when `who` has some providers or `insecure_zero_ed` feature is disabled. /// Returns `false` otherwise. #[cfg(not(feature = "insecure_zero_ed"))] fn have_providers_or_no_zero_ed(_: &T::AccountId) -> bool { true } - /// Returns `true` when `who` has some providers or `insecure_zero_ed` feature is disnabled. + /// Returns `true` when `who` has some providers or `insecure_zero_ed` feature is disabled. /// Returns `false` otherwise. #[cfg(feature = "insecure_zero_ed")] fn have_providers_or_no_zero_ed(who: &T::AccountId) -> bool { diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs index 46a4c4caefc..bd4ff762c74 100644 --- a/substrate/frame/balances/src/tests/currency_tests.rs +++ b/substrate/frame/balances/src/tests/currency_tests.rs @@ -1024,7 +1024,7 @@ fn slash_consumed_slash_partial_works() { } #[test] -fn slash_on_non_existant_works() { +fn slash_on_non_existent_works() { ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { // Slash on non-existent account is okay. assert_eq!(Balances::slash(&12345, 1_300), (NegativeImbalance::new(0), 1300)); @@ -1071,7 +1071,7 @@ fn slash_reserved_overslash_does_not_touch_free_balance() { } #[test] -fn slash_reserved_on_non_existant_works() { +fn slash_reserved_on_non_existent_works() { ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { // Slash on non-existent account is okay. assert_eq!(Balances::slash_reserved(&12345, 1_300), (NegativeImbalance::new(0), 1300)); diff --git a/substrate/frame/balances/src/tests/reentrancy_tests.rs b/substrate/frame/balances/src/tests/reentrancy_tests.rs index 1afbe82c7e2..717f0497857 100644 --- a/substrate/frame/balances/src/tests/reentrancy_tests.rs +++ b/substrate/frame/balances/src/tests/reentrancy_tests.rs @@ -44,7 +44,7 @@ fn transfer_dust_removal_tst1_should_work() { assert_eq!(Balances::free_balance(&2), 0); // As expected beneficiary account 3 - // received the transfered fund. + // received the transferred fund. assert_eq!(Balances::free_balance(&3), 450); // Dust balance is deposited to account 1 @@ -123,7 +123,7 @@ fn repatriating_reserved_balance_dust_removal_should_work() { // Reserve a value on account 2, // Such that free balance is lower than - // Exestintial deposit. + // Existential deposit. assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), 1, 450)); // Since free balance of account 2 is lower than diff --git a/substrate/frame/benchmarking/src/analysis.rs b/substrate/frame/benchmarking/src/analysis.rs index 5fc3abb5a27..987078ff79a 100644 --- a/substrate/frame/benchmarking/src/analysis.rs +++ b/substrate/frame/benchmarking/src/analysis.rs @@ -41,7 +41,7 @@ pub enum BenchmarkSelector { /// Multiplies the value by 1000 and converts it into an u128. fn mul_1000_into_u128(value: f64) -> u128 { - // This is slighly more precise than the alternative of `(value * 1000.0) as u128`. + // This is slightly more precise than the alternative of `(value * 1000.0) as u128`. (value as u128) .saturating_mul(1000) .saturating_add((value.fract() * 1000.0) as u128) diff --git a/substrate/frame/benchmarking/src/lib.rs b/substrate/frame/benchmarking/src/lib.rs index f79582d03e5..d4ee0abbecc 100644 --- a/substrate/frame/benchmarking/src/lib.rs +++ b/substrate/frame/benchmarking/src/lib.rs @@ -203,7 +203,7 @@ pub use v1::*; /// ## Where Clause /// /// Some pallets require a where clause specifying constraints on their generics to make -/// writing benchmarks feasible. To accomodate this situation, you can provide such a where +/// writing benchmarks feasible. To accommodate this situation, you can provide such a where /// clause as the (only) argument to the `#[benchmarks]` or `#[instance_benchmarks]` attribute /// macros. Below is an example of this taken from the `message-queue` pallet. /// diff --git a/substrate/frame/benchmarking/src/v1.rs b/substrate/frame/benchmarking/src/v1.rs index 4ad8cc0edd4..b2449db3d67 100644 --- a/substrate/frame/benchmarking/src/v1.rs +++ b/substrate/frame/benchmarking/src/v1.rs @@ -874,7 +874,7 @@ macro_rules! impl_bench_name_tests { $crate::BenchmarkError::Override(_) => { // This is still considered a success condition. $crate::__private::log::error!( - "WARNING: benchmark error overrided - {}", + "WARNING: benchmark error overridden - {}", stringify!($name), ); }, @@ -1704,7 +1704,7 @@ macro_rules! impl_test_function { $crate::BenchmarkError::Override(_) => { // This is still considered a success condition. $crate::__private::log::error!( - "WARNING: benchmark error overrided - {}", + "WARNING: benchmark error overridden - {}", $crate::__private::str::from_utf8(benchmark_name) .expect("benchmark name is always a valid string!"), ); @@ -1851,7 +1851,7 @@ macro_rules! add_benchmark { Err($crate::BenchmarkError::Override(mut result)) => { // Insert override warning as the first storage key. $crate::__private::log::error!( - "WARNING: benchmark error overrided - {}", + "WARNING: benchmark error overridden - {}", $crate::__private::str::from_utf8(benchmark) .expect("benchmark name is always a valid string!") ); diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index a669463aa02..f1b49a73a52 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -683,7 +683,7 @@ pub mod pallet { /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`. /// - `region_id`: The Region which was assigned to the Pool. /// - `max_timeslices`: The maximum number of timeslices which should be processed. This may - /// effect the weight of the call but should be ideally made equivalant to the length of + /// effect the weight of the call but should be ideally made equivalent to the length of /// the Region `region_id`. If it is less than this, then further dispatches will be /// required with the `region_id` which makes up any remainders of the region to be /// collected. diff --git a/substrate/frame/broker/src/types.rs b/substrate/frame/broker/src/types.rs index 7e9f351723a..e8119d29ef5 100644 --- a/substrate/frame/broker/src/types.rs +++ b/substrate/frame/broker/src/types.rs @@ -55,7 +55,7 @@ pub enum Finality { pub struct RegionId { /// The timeslice at which this Region begins. pub begin: Timeslice, - /// The index of the Polakdot Core on which this Region will be scheduled. + /// The index of the Polkadot Core on which this Region will be scheduled. pub core: CoreIndex, /// The regularity parts in which this Region will be scheduled. pub mask: CoreMask, @@ -198,7 +198,7 @@ pub struct PoolIoRecord { /// The total change of the portion of the pool supplied by purchased Bulk Coretime, measured /// in Core Mask Bits. pub private: SignedCoreMaskBitCount, - /// The total change of the portion of the pool supplied by the Polkaot System, measured in + /// The total change of the portion of the pool supplied by the Polkadot System, measured in /// Core Mask Bits. pub system: SignedCoreMaskBitCount, } diff --git a/substrate/frame/collective/README.md b/substrate/frame/collective/README.md index 444927e51da..e860edbd484 100644 --- a/substrate/frame/collective/README.md +++ b/substrate/frame/collective/README.md @@ -9,7 +9,7 @@ calculations, but enforces this neither in `set_members` nor in `change_members_ A "prime" member may be set to help determine the default vote behavior based on chain config. If `PrimeDefaultVote` is used, the prime vote acts as the default vote in case of any abstentions after the voting period. If `MoreThanMajorityThenPrimeDefaultVote` is used, then -abstentations will first follow the majority of the collective voting, and then the prime +abstentions will first follow the majority of the collective voting, and then the prime member. Voting happens through motions comprising a proposal (i.e. a dispatchable) plus a diff --git a/substrate/frame/collective/src/lib.rs b/substrate/frame/collective/src/lib.rs index 882e99a6d00..d0009d02f68 100644 --- a/substrate/frame/collective/src/lib.rs +++ b/substrate/frame/collective/src/lib.rs @@ -283,7 +283,7 @@ pub mod pallet { pub type Members, I: 'static = ()> = StorageValue<_, Vec, ValueQuery>; - /// The prime member that helps determine the default vote behavior in case of absentations. + /// The prime member that helps determine the default vote behavior in case of abstentions. #[pallet::storage] pub type Prime, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; diff --git a/substrate/frame/collective/src/tests.rs b/substrate/frame/collective/src/tests.rs index 8a80dd167e3..5240dc215ff 100644 --- a/substrate/frame/collective/src/tests.rs +++ b/substrate/frame/collective/src/tests.rs @@ -995,7 +995,7 @@ fn motions_all_first_vote_free_works() { Collective::vote(RuntimeOrigin::signed(3), hash, 0, false); assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); - // Test close() Extrincis | Check DispatchResultWithPostInfo with Pay Info + // Test close() Extrinsics | Check DispatchResultWithPostInfo with Pay Info let proposal_weight = proposal.get_dispatch_info().weight; let close_rval: DispatchResultWithPostInfo = diff --git a/substrate/frame/contracts/README.md b/substrate/frame/contracts/README.md index 6e817292e66..09dc770300c 100644 --- a/substrate/frame/contracts/README.md +++ b/substrate/frame/contracts/README.md @@ -59,7 +59,7 @@ In general, a contract execution needs to be deterministic so that all nodes com it. To that end we disallow any instructions that could cause indeterminism. Most notable are any floating point arithmetic. That said, sometimes contracts are executed off-chain and hence are not subject to consensus. If code is only executed by a single node and implicitly trusted by other actors is such a case. Trusted execution environments -come to mind. To that end we allow the execution of indeterminstic code for off-chain usages with the following +come to mind. To that end we allow the execution of indeterministic code for off-chain usages with the following constraints: 1. No contract can ever be instantiated from an indeterministic code. The only way to execute the code is to use a diff --git a/substrate/frame/contracts/fixtures/contracts/multi_store.rs b/substrate/frame/contracts/fixtures/contracts/multi_store.rs index b83f3995a42..a78115f0148 100644 --- a/substrate/frame/contracts/fixtures/contracts/multi_store.rs +++ b/substrate/frame/contracts/fixtures/contracts/multi_store.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Does two stores to two seperate storage items +//! Does two stores to two separate storage items #![no_std] #![no_main] diff --git a/substrate/frame/contracts/src/benchmarking/code.rs b/substrate/frame/contracts/src/benchmarking/code.rs index 96ce11f8c41..b97cf168e26 100644 --- a/substrate/frame/contracts/src/benchmarking/code.rs +++ b/substrate/frame/contracts/src/benchmarking/code.rs @@ -164,7 +164,7 @@ impl From for WasmModule { // Grant access to linear memory. // Every contract module is required to have an imported memory. - // If no memory is specified in the passed ModuleDefenition, then + // If no memory is specified in the passed ModuleDefinition, then // default to (1, 1). let (init, max) = if let Some(memory) = &def.memory { (memory.min_pages, Some(memory.max_pages)) diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index ce2cf15d812..9fb107537ba 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -340,7 +340,7 @@ mod benchmarks { assert_eq!(StorageVersion::get::>(), version); } - // This benchmarks the weight of dispatching migrate to execute 1 `NoopMigraton` + // This benchmarks the weight of dispatching migrate to execute 1 `NoopMigration` #[benchmark(pov_mode = Measured)] fn migrate() { let latest_version = LATEST_MIGRATION_VERSION; @@ -1859,7 +1859,7 @@ mod benchmarks { // We call unique accounts. // - // This is a slow call: We redeuce the number of runs. + // This is a slow call: We reduce the number of runs. #[benchmark(pov_mode = Measured)] fn seal_call(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { let dummy_code = WasmModule::::dummy_with_bytes(0); @@ -1937,7 +1937,7 @@ mod benchmarks { Ok(()) } - // This is a slow call: We redeuce the number of runs. + // This is a slow call: We reduce the number of runs. #[benchmark(pov_mode = Measured)] fn seal_delegate_call(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { let hashes = (0..r) @@ -2473,7 +2473,7 @@ mod benchmarks { // Only calling the function itself for the list of // generated different ECDSA keys. - // This is a slow call: We redeuce the number of runs. + // This is a slow call: We reduce the number of runs. #[benchmark(pov_mode = Measured)] fn seal_ecdsa_to_eth_address( r: Linear<0, { API_BENCHMARK_RUNS / 10 }>, diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index d24657b65b2..41a0383811f 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -531,9 +531,9 @@ enum FrameArgs<'a, T: Config, E> { nonce: u64, /// The executable whose `deploy` function is run. executable: E, - /// A salt used in the contract address deriviation of the new contract. + /// A salt used in the contract address derivation of the new contract. salt: &'a [u8], - /// The input data is used in the contract address deriviation of the new contract. + /// The input data is used in the contract address derivation of the new contract. input_data: &'a [u8], }, } diff --git a/substrate/frame/contracts/src/gas.rs b/substrate/frame/contracts/src/gas.rs index b9d91f38f16..32fad2140f1 100644 --- a/substrate/frame/contracts/src/gas.rs +++ b/substrate/frame/contracts/src/gas.rs @@ -352,7 +352,7 @@ mod tests { assert!(gas_meter.charge(SimpleToken(1)).is_err()); } - // Make sure that the gas meter does not charge in case of overcharger + // Make sure that the gas meter does not charge in case of overcharge #[test] fn overcharge_does_not_charge() { let mut gas_meter = GasMeter::::new(Weight::from_parts(200, 0)); diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index de84d5220c5..6433d4eecdc 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -1104,7 +1104,7 @@ pub mod pallet { /// A more detailed error can be found on the node console if debug messages are enabled /// by supplying `-lruntime::contracts=debug`. CodeRejected, - /// An indetermistic code was used in a context where this is not permitted. + /// An indeterministic code was used in a context where this is not permitted. Indeterministic, /// A pending migration needs to complete before the extrinsic can be called. MigrationInProgress, diff --git a/substrate/frame/contracts/src/migration/v09.rs b/substrate/frame/contracts/src/migration/v09.rs index f19bff9d674..8e718871ecb 100644 --- a/substrate/frame/contracts/src/migration/v09.rs +++ b/substrate/frame/contracts/src/migration/v09.rs @@ -131,7 +131,7 @@ impl MigrationStep for Migration { let module = CodeStorage::::get(&code_hash).unwrap(); ensure!( module.instruction_weights_version == old.instruction_weights_version, - "invalid isntruction weights version" + "invalid instruction weights version" ); ensure!(module.determinism == Determinism::Enforced, "invalid determinism"); ensure!(module.initial == old.initial, "invalid initial"); diff --git a/substrate/frame/contracts/src/schedule.rs b/substrate/frame/contracts/src/schedule.rs index b2e3801deae..06a7c2005aa 100644 --- a/substrate/frame/contracts/src/schedule.rs +++ b/substrate/frame/contracts/src/schedule.rs @@ -193,7 +193,7 @@ pub struct HostFnWeights { /// Weight of calling `seal_set_storage`. pub set_storage: Weight, - /// Weight per written byten of an item stored with `seal_set_storage`. + /// Weight per written byte of an item stored with `seal_set_storage`. pub set_storage_per_new_byte: Weight, /// Weight per overwritten byte of an item stored with `seal_set_storage`. diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index db6b2e80d07..ed486fc4a67 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -1347,7 +1347,7 @@ fn transfer_expendable_cannot_kill_account() { } #[test] -fn cannot_self_destruct_through_draning() { +fn cannot_self_destruct_through_draining() { let (wasm, _code_hash) = compile_module::("drain").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); @@ -1662,7 +1662,7 @@ fn cannot_self_destruct_in_constructor() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Fail to instantiate the BOB because the contructor calls seal_terminate. + // Fail to instantiate the BOB because the constructor calls seal_terminate. assert_err_ignore_postinfo!( Contracts::instantiate_with_code( RuntimeOrigin::signed(ALICE), @@ -5096,7 +5096,7 @@ fn cannot_instantiate_indeterministic_code() { >::CodeRejected, ); - // Try to upload a non deterministic code as deterministic + // Try to upload a non-deterministic code as deterministic assert_err!( Contracts::upload_code( RuntimeOrigin::signed(ALICE), @@ -5176,7 +5176,7 @@ fn cannot_instantiate_indeterministic_code() { >::Indeterministic, ); - // Instantiations are not allowed even in non determinism mode + // Instantiations are not allowed even in non-determinism mode assert_err!( >::bare_call( ALICE, @@ -5202,7 +5202,7 @@ fn cannot_set_code_indeterministic_code() { ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Put the non deterministic contract on-chain + // Put the non-deterministic contract on-chain assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), wasm, @@ -5226,7 +5226,7 @@ fn cannot_set_code_indeterministic_code() { .unwrap() .account_id; - // We do not allow to set the code hash to a non determinstic wasm + // We do not allow to set the code hash to a non-deterministic wasm assert_err!( >::bare_call( ALICE, @@ -5252,7 +5252,7 @@ fn delegate_call_indeterministic_code() { ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Put the non deterministic contract on-chain + // Put the non-deterministic contract on-chain assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), wasm, @@ -5293,7 +5293,7 @@ fn delegate_call_indeterministic_code() { >::Indeterministic, ); - // The delegate call will work on non deterministic mode + // The delegate call will work on non-deterministic mode assert_ok!( >::bare_call( ALICE, @@ -5429,7 +5429,7 @@ fn locking_delegate_dependency_works() { contract.storage_base_deposit() - ED ); - // Removing an unexisting dependency should fail. + // Removing a nonexistent dependency should fail. assert_err!( call(&addr_caller, &unlock_delegate_dependency_input).result, Error::::DelegateDependencyNotFound diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index af287a6f2df..e6af62c7289 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -1425,7 +1425,7 @@ mod tests { #[test] fn contract_ecdsa_to_eth_address() { - /// calls `seal_ecdsa_to_eth_address` for the contstant and ensures the result equals the + /// calls `seal_ecdsa_to_eth_address` for the constant and ensures the result equals the /// expected one. const CODE_ECDSA_TO_ETH_ADDRESS: &str = r#" (module diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 7d43d30ba58..160dfa0d2f3 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -245,7 +245,7 @@ pub enum RuntimeCosts { /// Weight of calling `account_reentrance_count` AccountEntranceCount, /// Weight of calling `instantiation_nonce` - InstantationNonce, + InstantiationNonce, /// Weight of calling `lock_delegate_dependency` LockDelegateDependency, /// Weight of calling `unlock_delegate_dependency` @@ -337,7 +337,7 @@ impl Token for RuntimeCosts { EcdsaToEthAddress => s.ecdsa_to_eth_address, ReentrantCount => s.reentrance_count, AccountEntranceCount => s.account_reentrance_count, - InstantationNonce => s.instantiation_nonce, + InstantiationNonce => s.instantiation_nonce, LockDelegateDependency => s.lock_delegate_dependency, UnlockDelegateDependency => s.unlock_delegate_dependency, } @@ -2298,7 +2298,7 @@ pub mod env { /// Returns a nonce that is unique per contract instantiation. /// See [`pallet_contracts_uapi::HostFn::instantiation_nonce`]. fn instantiation_nonce(ctx: _, _memory: _) -> Result { - ctx.charge_gas(RuntimeCosts::InstantationNonce)?; + ctx.charge_gas(RuntimeCosts::InstantiationNonce)?; Ok(ctx.ext.nonce()) } diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs index 5c582a35362..74baeace898 100644 --- a/substrate/frame/conviction-voting/src/tests.rs +++ b/substrate/frame/conviction-voting/src/tests.rs @@ -39,7 +39,7 @@ frame_support::construct_runtime!( } ); -// Test that a fitlered call can be dispatched. +// Test that a filtered call can be dispatched. pub struct BaseFilter; impl Contains for BaseFilter { fn contains(call: &RuntimeCall) -> bool { diff --git a/substrate/frame/core-fellowship/src/tests/integration.rs b/substrate/frame/core-fellowship/src/tests/integration.rs index 3c2dc1de595..d3bbac15805 100644 --- a/substrate/frame/core-fellowship/src/tests/integration.rs +++ b/substrate/frame/core-fellowship/src/tests/integration.rs @@ -251,7 +251,7 @@ fn swap_exhaustive_works() { }); assert_eq!(root_add, root_swap); - // Ensure that we dont compare trivial stuff like `()` from a type error above. + // Ensure that we don't compare trivial stuff like `()` from a type error above. assert_eq!(root_add.len(), 32); }); } diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs index 08e2a7599f5..f3d33a72f3a 100644 --- a/substrate/frame/democracy/src/lib.rs +++ b/substrate/frame/democracy/src/lib.rs @@ -484,7 +484,7 @@ pub mod pallet { Blacklisted { proposal_hash: T::Hash }, /// An account has voted in a referendum Voted { voter: T::AccountId, ref_index: ReferendumIndex, vote: AccountVote> }, - /// An account has secconded a proposal + /// An account has seconded a proposal Seconded { seconder: T::AccountId, prop_index: PropIndex }, /// A proposal got canceled. ProposalCanceled { prop_index: PropIndex }, diff --git a/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs b/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs index 188c475f64d..1cb50a157b1 100644 --- a/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs +++ b/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs @@ -321,40 +321,40 @@ mod test { } #[test] - fn unreserve_works_for_depositer() { - let depositer_0 = 10; - let depositer_1 = 11; + fn unreserve_works_for_depositor() { + let depositor_0 = 10; + let depositor_1 = 11; let deposit = 25; - let depositer_0_initial_reserved = 0; - let depositer_1_initial_reserved = 15; + let depositor_0_initial_reserved = 0; + let depositor_1_initial_reserved = 15; let initial_balance = 100_000; new_test_ext().execute_with(|| { // Set up initial state. - ::Currency::make_free_balance_be(&depositer_0, initial_balance); - ::Currency::make_free_balance_be(&depositer_1, initial_balance); + ::Currency::make_free_balance_be(&depositor_0, initial_balance); + ::Currency::make_free_balance_be(&depositor_1, initial_balance); assert_ok!(::Currency::reserve( - &depositer_0, - depositer_0_initial_reserved + deposit + &depositor_0, + depositor_0_initial_reserved + deposit )); assert_ok!(::Currency::reserve( - &depositer_1, - depositer_1_initial_reserved + deposit + &depositor_1, + depositor_1_initial_reserved + deposit )); let depositors = BoundedVec::<_, ::MaxDeposits>::truncate_from(vec![ - depositer_0, - depositer_1, + depositor_0, + depositor_1, ]); DepositOf::::insert(0, (depositors, deposit)); // Sanity check: ensure initial reserved balance was set correctly. assert_eq!( - ::Currency::reserved_balance(&depositer_0), - depositer_0_initial_reserved + deposit + ::Currency::reserved_balance(&depositor_0), + depositor_0_initial_reserved + deposit ); assert_eq!( - ::Currency::reserved_balance(&depositer_1), - depositer_1_initial_reserved + deposit + ::Currency::reserved_balance(&depositor_1), + depositor_1_initial_reserved + deposit ); // Run the migration. @@ -365,12 +365,12 @@ mod test { // Assert the reserved balance was reduced by the expected amount. assert_eq!( - ::Currency::reserved_balance(&depositer_0), - depositer_0_initial_reserved + ::Currency::reserved_balance(&depositor_0), + depositor_0_initial_reserved ); assert_eq!( - ::Currency::reserved_balance(&depositer_1), - depositer_1_initial_reserved + ::Currency::reserved_balance(&depositor_1), + depositor_1_initial_reserved ); }); } diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index dd69f48dbc1..e2946ba9815 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -62,7 +62,7 @@ frame_support::construct_runtime!( } ); -// Test that a fitlered call can be dispatched. +// Test that a filtered call can be dispatched. pub struct BaseFilter; impl Contains for BaseFilter { fn contains(call: &RuntimeCall) -> bool { diff --git a/substrate/frame/democracy/src/tests/cancellation.rs b/substrate/frame/democracy/src/tests/cancellation.rs index 4384fe6a164..b4c42f9c790 100644 --- a/substrate/frame/democracy/src/tests/cancellation.rs +++ b/substrate/frame/democracy/src/tests/cancellation.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! The tests for cancelation functionality. +//! The tests for cancellation functionality. use super::*; diff --git a/substrate/frame/election-provider-multi-phase/src/helpers.rs b/substrate/frame/election-provider-multi-phase/src/helpers.rs index 57d580e9301..a3f27fc18f0 100644 --- a/substrate/frame/election-provider-multi-phase/src/helpers.rs +++ b/substrate/frame/election-provider-multi-phase/src/helpers.rs @@ -160,7 +160,7 @@ pub fn target_index_fn_linear( } /// Create a function that can map a voter index ([`SolutionVoterIndexOf`]) to the actual voter -/// account using a linearly indexible snapshot. +/// account using a linearly indexable snapshot. pub fn voter_at_fn( snapshot: &Vec>, ) -> impl Fn(SolutionVoterIndexOf) -> Option + '_ { @@ -172,7 +172,7 @@ pub fn voter_at_fn( } /// Create a function that can map a target index ([`SolutionTargetIndexOf`]) to the actual target -/// account using a linearly indexible snapshot. +/// account using a linearly indexable snapshot. pub fn target_at_fn( snapshot: &Vec, ) -> impl Fn(SolutionTargetIndexOf) -> Option + '_ { diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 6bf4dfe4f1e..31a79577d1f 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -1134,7 +1134,7 @@ pub mod pallet { /// A solution was stored with the given compute. /// /// The `origin` indicates the origin of the solution. If `origin` is `Some(AccountId)`, - /// the stored solution was submited in the signed phase by a miner with the `AccountId`. + /// the stored solution was submitted in the signed phase by a miner with the `AccountId`. /// Otherwise, the solution was stored either during the unsigned phase or by /// `T::ForceOrigin`. The `bool` is `true` when a previous solution was ejected to make /// room for this one. @@ -1192,7 +1192,7 @@ pub mod pallet { BoundNotMet, /// Submitted solution has too many winners TooManyWinners, - /// Sumission was prepared for a different round. + /// Submission was prepared for a different round. PreDispatchDifferentRound, } diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index 94348181334..8b25815eca1 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -1445,7 +1445,7 @@ mod tests { ) .unwrap(); let solution = RawSolution { solution: raw, score, round: MultiPhase::round() }; - // 12 is not better than 12. We need score of atleast 13 to be accepted. + // 12 is not better than 12. We need score of at least 13 to be accepted. assert_eq!(solution.score.minimal_stake, 12); // submitting this will panic. assert_noop!( @@ -1483,7 +1483,7 @@ mod tests { )); // trial 4: a solution who's minimal stake is 17, i.e. 4 better than the last - // soluton. + // solution. let result = ElectionResult { winners: vec![(10, 12)], assignments: vec![ diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index 53bff50f748..83083c91209 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -208,8 +208,8 @@ fn enters_emergency_phase_after_forcing_before_elect() { /// active validators. Thus, slashing a percentage of the current validators that is lower than /// `OffendingValidatorsThreshold` will never force a new era. However, as the slashes progress, if /// the subsequent elections do not meet the minimum election untrusted score, the election will -/// fail and enter in emenergency mode. -fn continous_slashes_below_offending_threshold() { +/// fail and enter in emergency mode. +fn continuous_slashes_below_offending_threshold() { let staking_builder = StakingExtBuilder::default().validator_count(10); let epm_builder = EpmExtBuilder::default().disable_emergency_throttling(); @@ -323,7 +323,7 @@ fn set_validation_intention_after_chilled() { } #[test] -/// Active ledger balance may fall below ED if account chills before unbounding. +/// Active ledger balance may fall below ED if account chills before unbonding. /// /// Unbonding call fails if the remaining ledger's stash balance falls below the existential /// deposit. However, if the stash is chilled before unbonding, the ledger's active balance may @@ -350,7 +350,7 @@ fn ledger_consistency_active_balance_below_ed() { // however, chilling works as expected. assert_ok!(Staking::chill(RuntimeOrigin::signed(11))); - // now unbonding the full active balance works, since remainer of the active balance is + // now unbonding the full active balance works, since remainder of the active balance is // not enforced to be below `MinNominatorBond` if the stash has been chilled. assert_ok!(Staking::unbond(RuntimeOrigin::signed(11), 1000)); @@ -479,7 +479,7 @@ fn automatic_unbonding_pools() { staking_events(), [ // auto-withdraw happened as expected to release 2's unbonding funds, but the funds - // were not transfered to 2 and stay in the pool's tranferrable balance instead. + // were not transferred to 2 and stay in the pool's transferrable balance instead. pallet_staking::Event::Withdrawn { stash: 7939698191839293293, amount: 10 }, pallet_staking::Event::Unbonded { stash: 7939698191839293293, amount: 10 } ] diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index d7d2006d219..7efcc4701e2 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -167,7 +167,7 @@ parameter_types! { pub static SignedPhase: BlockNumber = 10; pub static UnsignedPhase: BlockNumber = 10; // we expect a minimum of 3 blocks in signed phase and unsigned phases before trying - // enetering in emergency phase after the election failed. + // entering in emergency phase after the election failed. pub static MinBlocksBeforeEmergency: BlockNumber = 3; pub static MaxActiveValidators: u32 = 1000; pub static OffchainRepeat: u32 = 5; @@ -661,7 +661,7 @@ pub fn roll_to(n: BlockNumber, delay_solution: bool) { Session::on_initialize(b); Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP); - // TODO(gpestana): implement a realistic OCW worker insted of simulating it + // TODO(gpestana): implement a realistic OCW worker instead of simulating it // https://github.com/paritytech/substrate/issues/13589 // if there's no solution queued and the solution should not be delayed, try mining and // queue a solution. diff --git a/substrate/frame/election-provider-support/src/bounds.rs b/substrate/frame/election-provider-support/src/bounds.rs index b9ae21e49ca..6b2423b7fec 100644 --- a/substrate/frame/election-provider-support/src/bounds.rs +++ b/substrate/frame/election-provider-support/src/bounds.rs @@ -62,7 +62,7 @@ use sp_runtime::traits::Zero; /// Encapsulates the counting of things that can be bounded in an election, such as voters, /// targets or anything else. /// -/// This struct is defined mostly to prevent callers from mistankingly using `CountBound` instead of +/// This struct is defined mostly to prevent callers from mistakenly using `CountBound` instead of /// `SizeBound` and vice-versa. #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct CountBound(pub u32); @@ -96,7 +96,7 @@ impl Zero for CountBound { /// logic and implementation, but it most likely will represent bytes in SCALE encoding in this /// context. /// -/// This struct is defined mostly to prevent callers from mistankingly using `CountBound` instead of +/// This struct is defined mostly to prevent callers from mistakenly using `CountBound` instead of /// `SizeBound` and vice-versa. #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct SizeBound(pub u32); diff --git a/substrate/frame/examples/offchain-worker/src/lib.rs b/substrate/frame/examples/offchain-worker/src/lib.rs index d21c8b4cfd2..0a90e896188 100644 --- a/substrate/frame/examples/offchain-worker/src/lib.rs +++ b/substrate/frame/examples/offchain-worker/src/lib.rs @@ -495,7 +495,7 @@ impl Pallet { // Here we showcase two ways to send an unsigned transaction / unsigned payload (raw) // // By default unsigned transactions are disallowed, so we need to whitelist this case - // by writing `UnsignedValidator`. Note that it's EXTREMELY important to carefuly + // by writing `UnsignedValidator`. Note that it's EXTREMELY important to carefully // implement unsigned validation logic, as any mistakes can lead to opening DoS or spam // attack vectors. See validation logic docs for more details. // diff --git a/substrate/frame/examples/single-block-migrations/src/lib.rs b/substrate/frame/examples/single-block-migrations/src/lib.rs index 86a9e5d6e95..b36d5262267 100644 --- a/substrate/frame/examples/single-block-migrations/src/lib.rs +++ b/substrate/frame/examples/single-block-migrations/src/lib.rs @@ -20,7 +20,7 @@ //! An example pallet demonstrating best-practices for writing single-block migrations in the //! context of upgrading pallet storage. //! -//! ## Forwarning +//! ## Forewarning //! //! Single block migrations **MUST** execute in a single block, therefore when executed on a //! parachain are only appropriate when guaranteed to not exceed block weight limits. If a @@ -66,7 +66,7 @@ //! //! ## Adding a migration module //! -//! Writing a pallets migrations in a seperate module is strongly recommended. +//! Writing a pallets migrations in a separate module is strongly recommended. //! //! Here's how the migration module is defined for this pallet: //! diff --git a/substrate/frame/examples/split/Cargo.toml b/substrate/frame/examples/split/Cargo.toml index d140fc3eef4..230dc980b1a 100644 --- a/substrate/frame/examples/split/Cargo.toml +++ b/substrate/frame/examples/split/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "MIT-0" homepage = "https://substrate.io" repository.workspace = true -description = "FRAME example splitted pallet" +description = "FRAME example split pallet" readme = "README.md" [lints] diff --git a/substrate/frame/examples/tasks/Cargo.toml b/substrate/frame/examples/tasks/Cargo.toml index 41521114366..4d14bf313d7 100644 --- a/substrate/frame/examples/tasks/Cargo.toml +++ b/substrate/frame/examples/tasks/Cargo.toml @@ -5,7 +5,7 @@ authors.workspace = true edition.workspace = true license.workspace = true repository.workspace = true -description = "Pallet to demonstrate the usage of Tasks to recongnize and execute service work" +description = "Pallet to demonstrate the usage of Tasks to recognize and execute service work" [lints] workspace = true diff --git a/substrate/frame/fast-unstake/src/tests.rs b/substrate/frame/fast-unstake/src/tests.rs index b19fe3b8c46..77128872f28 100644 --- a/substrate/frame/fast-unstake/src/tests.rs +++ b/substrate/frame/fast-unstake/src/tests.rs @@ -110,7 +110,7 @@ fn cannot_register_if_head() { stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![], }); - // Controller attempts to regsiter + // Controller attempts to register assert_noop!( FastUnstake::register_fast_unstake(RuntimeOrigin::signed(1)), Error::::AlreadyHead diff --git a/substrate/frame/grandpa/src/benchmarking.rs b/substrate/frame/grandpa/src/benchmarking.rs index 7a87f0c4b07..c89592b3b35 100644 --- a/substrate/frame/grandpa/src/benchmarking.rs +++ b/substrate/frame/grandpa/src/benchmarking.rs @@ -29,7 +29,7 @@ benchmarks! { // NOTE: generated with the test below `test_generate_equivocation_report_blob`. // the output should be deterministic since the keys we use are static. // with the current benchmark setup it is not possible to generate this - // programatically from the benchmark setup. + // programmatically from the benchmark setup. const EQUIVOCATION_PROOF_BLOB: [u8; 257] = [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 136, 220, 52, 23, 213, 5, 142, 196, 180, 80, 62, 12, 18, 234, 26, 10, 137, 190, 32, diff --git a/substrate/frame/grandpa/src/tests.rs b/substrate/frame/grandpa/src/tests.rs index 993d72af6d4..8b12d63adaa 100644 --- a/substrate/frame/grandpa/src/tests.rs +++ b/substrate/frame/grandpa/src/tests.rs @@ -634,7 +634,7 @@ fn report_equivocation_invalid_equivocation_proof() { (1, H256::zero(), 10, &equivocation_keyring), )); - // votes targetting different rounds, there is no equivocation. + // votes targeting different rounds, there is no equivocation. assert_invalid_equivocation_proof(generate_equivocation_proof( set_id, (1, H256::random(), 10, &equivocation_keyring), diff --git a/substrate/frame/identity/src/benchmarking.rs b/substrate/frame/identity/src/benchmarking.rs index e0b45eeecd1..cdcdb952261 100644 --- a/substrate/frame/identity/src/benchmarking.rs +++ b/substrate/frame/identity/src/benchmarking.rs @@ -671,10 +671,10 @@ mod benchmarks { let username = bounded_username::(bench_username(), bench_suffix()); Identity::::queue_acceptance(&caller, username.clone()); - let expected_exiration = + let expected_expiration = frame_system::Pallet::::block_number() + T::PendingUsernameExpiration::get(); - run_to_block::(expected_exiration + One::one()); + run_to_block::(expected_expiration + One::one()); #[extrinsic_call] _(RawOrigin::Signed(caller.clone()), username); diff --git a/substrate/frame/identity/src/lib.rs b/substrate/frame/identity/src/lib.rs index 78d59180b3f..4a977880b31 100644 --- a/substrate/frame/identity/src/lib.rs +++ b/substrate/frame/identity/src/lib.rs @@ -161,7 +161,7 @@ pub mod pallet { /// Structure holding information about an identity. type IdentityInformation: IdentityInformationProvider; - /// Maxmimum number of registrars allowed in the system. Needed to bound the complexity + /// Maximum number of registrars allowed in the system. Needed to bound the complexity /// of, e.g., updating judgements. #[pallet::constant] type MaxRegistrars: Get; diff --git a/substrate/frame/im-online/src/tests.rs b/substrate/frame/im-online/src/tests.rs index 5e5212e1d56..f9959593494 100644 --- a/substrate/frame/im-online/src/tests.rs +++ b/substrate/frame/im-online/src/tests.rs @@ -416,7 +416,7 @@ fn should_handle_non_linear_session_progress() { Session::rotate_session(); - // if we don't have valid results for the current session progres then + // if we don't have valid results for the current session progress then // we'll fallback to `HeartbeatAfter` and only heartbeat on block 5. MockCurrentSessionProgress::mutate(|p| *p = Some(None)); assert_eq!(ImOnline::send_heartbeats(2).err(), Some(OffchainErr::TooEarly)); diff --git a/substrate/frame/merkle-mountain-range/src/lib.rs b/substrate/frame/merkle-mountain-range/src/lib.rs index e3eb0dc8cf5..7b6edb37b7f 100644 --- a/substrate/frame/merkle-mountain-range/src/lib.rs +++ b/substrate/frame/merkle-mountain-range/src/lib.rs @@ -89,7 +89,7 @@ mod tests; /// is not available (since the block is not finished yet), /// we use the `parent_hash` here along with parent block number. pub struct ParentNumberAndHash { - _phanthom: sp_std::marker::PhantomData, + _phantom: sp_std::marker::PhantomData, } impl LeafDataProvider for ParentNumberAndHash { diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index 61028057394..93cd760eeb9 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -1460,7 +1460,7 @@ impl Pallet { /// Run a closure that errors on re-entrance. Meant to be used by anything that services queues. pub(crate) fn with_service_mutex R, R>(f: F) -> Result { - // Holds the singelton token instance. + // Holds the singleton token instance. environmental::environmental!(token: Option<()>); token::using_once(&mut Some(()), || { diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index dbef94b3b10..1f6e7777f01 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -90,7 +90,7 @@ fn queue_priority_retains() { MessageQueue::enqueue_message(msg("d"), Everywhere(2)); assert_ring(&[Everywhere(1), Everywhere(2), Everywhere(3)]); // service head is 1, it will process a, leaving service head at 2. it also processes b but - // doees not empty queue 2, so service head will end at 2. + // does not empty queue 2, so service head will end at 2. assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); assert_eq!( MessagesProcessed::take(), diff --git a/substrate/frame/migrations/src/mock_helpers.rs b/substrate/frame/migrations/src/mock_helpers.rs index c5e23efb4e3..995ec0a922c 100644 --- a/substrate/frame/migrations/src/mock_helpers.rs +++ b/substrate/frame/migrations/src/mock_helpers.rs @@ -107,7 +107,7 @@ impl SteppedMigrations for MockedMigrations { cursor: Option>, meter: &mut WeightMeter, ) -> Option>, SteppedMigrationError>> { - // This is a hack but should be fine. We dont need it in testing. + // This is a hack but should be fine. We don't need it in testing. Self::nth_step(n, cursor, meter) } diff --git a/substrate/frame/nis/README.md b/substrate/frame/nis/README.md index 032df7d0186..8a1a30f17e1 100644 --- a/substrate/frame/nis/README.md +++ b/substrate/frame/nis/README.md @@ -1,5 +1,5 @@ # NIS Module -Provides a non-interactiove variant of staking. +Provides a non-interactive variant of staking. License: Apache-2.0 diff --git a/substrate/frame/nis/src/lib.rs b/substrate/frame/nis/src/lib.rs index 5e547b63e54..7655cd1a824 100644 --- a/substrate/frame/nis/src/lib.rs +++ b/substrate/frame/nis/src/lib.rs @@ -426,7 +426,7 @@ pub mod pallet { }, /// An automatic funding of the deficit was made. Funded { deficit: BalanceOf }, - /// A receipt was transfered. + /// A receipt was transferred. Transferred { from: T::AccountId, to: T::AccountId, index: ReceiptIndex }, } @@ -457,7 +457,7 @@ pub mod pallet { AlreadyFunded, /// The thaw throttle has been reached for this period. Throttled, - /// The operation would result in a receipt worth an insignficant value. + /// The operation would result in a receipt worth an insignificant value. MakesDust, /// The receipt is already communal. AlreadyCommunal, diff --git a/substrate/frame/nis/src/tests.rs b/substrate/frame/nis/src/tests.rs index 7350da97dc6..01724999ae7 100644 --- a/substrate/frame/nis/src/tests.rs +++ b/substrate/frame/nis/src/tests.rs @@ -414,7 +414,7 @@ fn thaw_respects_transfers() { assert_eq!(Balances::reserved_balance(&1), 0); assert_eq!(Balances::reserved_balance(&2), 40); - // Transfering the receipt... + // Transferring the receipt... assert_noop!(Nis::thaw_private(signed(1), 0, None), Error::::NotOwner); // ...and thawing is possible. diff --git a/substrate/frame/node-authorization/src/lib.rs b/substrate/frame/node-authorization/src/lib.rs index 8a823d29f23..9019a863ad8 100644 --- a/substrate/frame/node-authorization/src/lib.rs +++ b/substrate/frame/node-authorization/src/lib.rs @@ -18,7 +18,7 @@ //! # Node authorization pallet //! //! This pallet manages a configurable set of nodes for a permissioned network. -//! Each node is dentified by a PeerId (i.e. `Vec`). It provides two ways to +//! Each node is identified by a PeerId (i.e. `Vec`). It provides two ways to //! authorize a node, //! //! - a set of well known nodes across different organizations in which the @@ -102,7 +102,7 @@ pub mod pallet { #[pallet::getter(fn owners)] pub type Owners = StorageMap<_, Blake2_128Concat, PeerId, T::AccountId>; - /// The additional adapative connections of each node. + /// The additional adaptive connections of each node. #[pallet::storage] #[pallet::getter(fn additional_connection)] pub type AdditionalConnections = @@ -161,7 +161,7 @@ pub mod pallet { NotClaimed, /// You are not the owner of the node. NotOwner, - /// No permisson to perform specific operation. + /// No permission to perform specific operation. PermissionDenied, } @@ -377,7 +377,7 @@ pub mod pallet { /// Add additional connections to a given node. /// /// - `node`: identifier of the node. - /// - `connections`: additonal nodes from which the connections are allowed. + /// - `connections`: additional nodes from which the connections are allowed. #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::add_connections())] pub fn add_connections( @@ -412,7 +412,7 @@ pub mod pallet { /// Remove additional connections of a given node. /// /// - `node`: identifier of the node. - /// - `connections`: additonal nodes from which the connections are not allowed anymore. + /// - `connections`: additional nodes from which the connections are not allowed anymore. #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::remove_connections())] pub fn remove_connections( diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index a0dbdb3aaa4..f29a49a2b1b 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -451,7 +451,7 @@ enum AccountType { /// The permission a pool member can set for other accounts to claim rewards on their behalf. #[derive(Encode, Decode, MaxEncodedLen, Clone, Copy, Debug, PartialEq, Eq, TypeInfo)] pub enum ClaimPermission { - /// Only the pool member themself can claim their rewards. + /// Only the pool member themselves can claim their rewards. Permissioned, /// Anyone can compound rewards on a pool member's behalf. PermissionlessCompound, @@ -2066,7 +2066,7 @@ pub mod pallet { /// The member will earn rewards pro rata based on the members stake vs the sum of the /// members in the pools stake. Rewards do not "expire". /// - /// See `claim_payout_other` to caim rewards on bahalf of some `other` pool member. + /// See `claim_payout_other` to claim rewards on behalf of some `other` pool member. #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::claim_payout())] pub fn claim_payout(origin: OriginFor) -> DispatchResult { @@ -2797,7 +2797,7 @@ pub mod pallet { /// Set or remove a pool's commission claim permission. /// /// Determines who can claim the pool's pending commission. Only the `Root` role of the pool - /// is able to conifigure commission claim permissions. + /// is able to configure commission claim permissions. #[pallet::call_index(22)] #[pallet::weight(T::WeightInfo::set_commission_claim_permission())] pub fn set_commission_claim_permission( @@ -2836,7 +2836,7 @@ pub mod pallet { assert!( T::Staking::bonding_duration() < TotalUnbondingPools::::get(), "There must be more unbonding pools then the bonding duration / - so a slash can be applied to relevant unboding pools. (We assume / + so a slash can be applied to relevant unbonding pools. (We assume / the bonding duration > slash deffer duration.", ); } diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index 14410339f59..ca9c0874a83 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -70,7 +70,7 @@ pub mod unversioned { fn on_runtime_upgrade() -> Weight { let migrated = BondedPools::::count(); - // recalcuate the `TotalValueLocked` to compare with the current on-chain TVL which may + // recalculate the `TotalValueLocked` to compare with the current on-chain TVL which may // be out of sync. let tvl: BalanceOf = helpers::calculate_tvl_by_total_stake::(); let onchain_tvl = TotalValueLocked::::get(); diff --git a/substrate/frame/offences/src/tests.rs b/substrate/frame/offences/src/tests.rs index d525c7c3ab1..4897b78f3e4 100644 --- a/substrate/frame/offences/src/tests.rs +++ b/substrate/frame/offences/src/tests.rs @@ -204,7 +204,7 @@ fn reports_if_an_offence_is_dup() { assert_eq!(Offences::report_offence(vec![], test_offence.clone()), Ok(())); // creating a new offence for the same authorities on the next slot - // should be considered a new offence and thefore not known + // should be considered a new offence and therefore not known let test_offence_next_slot = offence(time_slot + 1, vec![0, 1]); assert!(!>::is_known_offence( &test_offence_next_slot.offenders, diff --git a/substrate/frame/paged-list/src/paged_list.rs b/substrate/frame/paged-list/src/paged_list.rs index 75467f3ceeb..eecc728cd62 100644 --- a/substrate/frame/paged-list/src/paged_list.rs +++ b/substrate/frame/paged-list/src/paged_list.rs @@ -190,7 +190,7 @@ impl Page { let values = sp_io::storage::get(&key) .and_then(|raw| sp_std::vec::Vec::::decode(&mut &raw[..]).ok())?; if values.is_empty() { - // Dont create empty pages. + // Don't create empty pages. return None } let values = values.into_iter().skip(value_index as usize); diff --git a/substrate/frame/parameters/src/lib.rs b/substrate/frame/parameters/src/lib.rs index 3e54eb6d67f..55a6f1ff91d 100644 --- a/substrate/frame/parameters/src/lib.rs +++ b/substrate/frame/parameters/src/lib.rs @@ -122,7 +122,7 @@ use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use frame_support::traits::{ - dynamic_params::{AggregratedKeyValue, IntoKey, Key, RuntimeParameterStore, TryIntoKey}, + dynamic_params::{AggregatedKeyValue, IntoKey, Key, RuntimeParameterStore, TryIntoKey}, EnsureOriginWithArg, }; @@ -135,10 +135,10 @@ pub use pallet::*; pub use weights::WeightInfo; /// The key type of a parameter. -type KeyOf = <::RuntimeParameters as AggregratedKeyValue>::Key; +type KeyOf = <::RuntimeParameters as AggregatedKeyValue>::Key; /// The value type of a parameter. -type ValueOf = <::RuntimeParameters as AggregratedKeyValue>::Value; +type ValueOf = <::RuntimeParameters as AggregatedKeyValue>::Value; #[frame_support::pallet] pub mod pallet { @@ -154,7 +154,7 @@ pub mod pallet { /// /// Usually created by [`frame_support::dynamic_params`] or equivalent. #[pallet::no_default_bounds] - type RuntimeParameters: AggregratedKeyValue; + type RuntimeParameters: AggregatedKeyValue; /// The origin which may update a parameter. /// @@ -175,11 +175,11 @@ pub mod pallet { /// Is also emitted when the value was not changed. Updated { /// The key that was updated. - key: ::Key, + key: ::Key, /// The old value before this call. - old_value: Option<::Value>, + old_value: Option<::Value>, /// The new value after this call. - new_value: Option<::Value>, + new_value: Option<::Value>, }, } @@ -245,23 +245,23 @@ pub mod pallet { } impl RuntimeParameterStore for Pallet { - type AggregratedKeyValue = T::RuntimeParameters; + type AggregatedKeyValue = T::RuntimeParameters; fn get(key: K) -> Option where - KV: AggregratedKeyValue, - K: Key + Into<::Key>, - ::Key: IntoKey< - <::AggregratedKeyValue as AggregratedKeyValue>::Key, + KV: AggregatedKeyValue, + K: Key + Into<::Key>, + ::Key: IntoKey< + <::AggregatedKeyValue as AggregatedKeyValue>::Key, >, - <::AggregratedKeyValue as AggregratedKeyValue>::Value: - TryIntoKey<::Value>, - ::Value: TryInto, + <::AggregatedKeyValue as AggregatedKeyValue>::Value: + TryIntoKey<::Value>, + ::Value: TryInto, { - let key: ::Key = key.into(); + let key: ::Key = key.into(); let val = Parameters::::get(key.into_key()); val.and_then(|v| { - let val: ::Value = v.try_into_key().ok()?; + let val: ::Value = v.try_into_key().ok()?; let val: K::WrappedValue = val.try_into().ok()?; let val = val.into(); Some(val) diff --git a/substrate/frame/parameters/src/tests/unit.rs b/substrate/frame/parameters/src/tests/unit.rs index d3f11ba9640..d811a835646 100644 --- a/substrate/frame/parameters/src/tests/unit.rs +++ b/substrate/frame/parameters/src/tests/unit.rs @@ -25,7 +25,7 @@ use crate::tests::mock::{ RuntimeParametersValue, }; use codec::Encode; -use frame_support::{assert_noop, assert_ok, traits::dynamic_params::AggregratedKeyValue}; +use frame_support::{assert_noop, assert_ok, traits::dynamic_params::AggregatedKeyValue}; use sp_core::Get; use sp_runtime::DispatchError; diff --git a/substrate/frame/proxy/src/lib.rs b/substrate/frame/proxy/src/lib.rs index 4d4da0433af..2b3fac5f59e 100644 --- a/substrate/frame/proxy/src/lib.rs +++ b/substrate/frame/proxy/src/lib.rs @@ -119,7 +119,7 @@ pub mod pallet { /// The currency mechanism. type Currency: ReservableCurrency; - /// A kind of proxy; specified with the proxy and passed in to the `IsProxyable` fitler. + /// A kind of proxy; specified with the proxy and passed in to the `IsProxyable` filter. /// The instance filter determines whether a given call may be proxied under this type. /// /// IMPORTANT: `Default` must be provided and MUST BE the the *most permissive* value. diff --git a/substrate/frame/root-testing/src/lib.rs b/substrate/frame/root-testing/src/lib.rs index 51fd835409a..98e1f5c5b66 100644 --- a/substrate/frame/root-testing/src/lib.rs +++ b/substrate/frame/root-testing/src/lib.rs @@ -17,7 +17,7 @@ //! # Root Testing Pallet //! -//! Pallet that contains extrinsics that can be usefull in testing. +//! Pallet that contains extrinsics that can be useful in testing. //! //! NOTE: This pallet should only be used for testing purposes and should not be used in production //! runtimes! diff --git a/substrate/frame/salary/src/tests/integration.rs b/substrate/frame/salary/src/tests/integration.rs index 26afb2d8aba..124ab38c565 100644 --- a/substrate/frame/salary/src/tests/integration.rs +++ b/substrate/frame/salary/src/tests/integration.rs @@ -250,7 +250,7 @@ fn swap_exhaustive_works() { }); assert_eq!(root_add, root_swap); - // Ensure that we dont compare trivial stuff like `()` from a type error above. + // Ensure that we don't compare trivial stuff like `()` from a type error above. assert_eq!(root_add.len(), 32); }); } diff --git a/substrate/frame/salary/src/tests/unit.rs b/substrate/frame/salary/src/tests/unit.rs index 5e308354116..db1c8b947ef 100644 --- a/substrate/frame/salary/src/tests/unit.rs +++ b/substrate/frame/salary/src/tests/unit.rs @@ -508,7 +508,7 @@ fn zero_payment_fails() { } #[test] -fn unregistered_bankrupcy_fails_gracefully() { +fn unregistered_bankruptcy_fails_gracefully() { new_test_ext().execute_with(|| { assert_ok!(Salary::init(RuntimeOrigin::signed(1))); set_rank(1, 2); @@ -532,7 +532,7 @@ fn unregistered_bankrupcy_fails_gracefully() { } #[test] -fn registered_bankrupcy_fails_gracefully() { +fn registered_bankruptcy_fails_gracefully() { new_test_ext().execute_with(|| { assert_ok!(Salary::init(RuntimeOrigin::signed(1))); set_rank(1, 2); @@ -561,7 +561,7 @@ fn registered_bankrupcy_fails_gracefully() { } #[test] -fn mixed_bankrupcy_fails_gracefully() { +fn mixed_bankruptcy_fails_gracefully() { new_test_ext().execute_with(|| { assert_ok!(Salary::init(RuntimeOrigin::signed(1))); set_rank(1, 2); @@ -589,7 +589,7 @@ fn mixed_bankrupcy_fails_gracefully() { } #[test] -fn other_mixed_bankrupcy_fails_gracefully() { +fn other_mixed_bankruptcy_fails_gracefully() { new_test_ext().execute_with(|| { assert_ok!(Salary::init(RuntimeOrigin::signed(1))); set_rank(1, 2); diff --git a/substrate/frame/sassafras/src/benchmarking.rs b/substrate/frame/sassafras/src/benchmarking.rs index 1c9626ad260..2b2467c6f84 100644 --- a/substrate/frame/sassafras/src/benchmarking.rs +++ b/substrate/frame/sassafras/src/benchmarking.rs @@ -84,7 +84,7 @@ mod benchmarks { // - load the full ring context. // - recompute the ring verifier. // - sorting the epoch tickets in one shot - // (here we account for the very unluky scenario where we haven't done any sort work yet) + // (here we account for the very unlucky scenario where we haven't done any sort work yet) // - pending epoch change config. // // For this bench we assume a redundancy factor of 2 (suggested value to be used in prod). diff --git a/substrate/frame/sassafras/src/lib.rs b/substrate/frame/sassafras/src/lib.rs index 0ee8657489b..8cbf1e47e32 100644 --- a/substrate/frame/sassafras/src/lib.rs +++ b/substrate/frame/sassafras/src/lib.rs @@ -234,7 +234,7 @@ pub mod pallet { /// Epoch X first N-th ticket has key (X mod 2, N) /// /// Note that the ticket's index doesn't directly correspond to the slot index within the epoch. - /// The assigment is computed dynamically using an *outside-in* strategy. + /// The assignment is computed dynamically using an *outside-in* strategy. /// /// Be aware that entries within this map are never removed, only overwritten. /// Last element index should be fetched from the [`TicketsMeta`] value. @@ -465,7 +465,7 @@ pub mod pallet { /// Plan an epoch configuration change. /// - /// The epoch configuration change is recorded and will be announced at the begining + /// The epoch configuration change is recorded and will be announced at the beginning /// of the next epoch together with next epoch authorities information. /// In other words, the configuration will be enacted one epoch later. /// @@ -758,11 +758,11 @@ impl Pallet { let randomness = hashing::blake2_256(buf.as_slice()); RandomnessAccumulator::::put(randomness); - let next_randoness = Self::update_epoch_randomness(1); + let next_randomness = Self::update_epoch_randomness(1); // Deposit a log as this is the first block in first epoch. let next_epoch = NextEpochDescriptor { - randomness: next_randoness, + randomness: next_randomness, authorities: Self::next_authorities().into_inner(), config: None, }; diff --git a/substrate/frame/scheduler/README.md b/substrate/frame/scheduler/README.md index 6aec2ddb0e4..5e233fdbdb0 100644 --- a/substrate/frame/scheduler/README.md +++ b/substrate/frame/scheduler/README.md @@ -16,7 +16,7 @@ for the origin: namely `frame_system::Config::BaseCallFilter` for all origin except root which will get no filter. And not the filter contained in origin use to call `fn schedule`. -If a call is scheduled using proxy or whatever mecanism which adds filter, +If a call is scheduled using proxy or whatever mechanism which adds filter, then those filter will not be used when dispatching the schedule call. ## Interface diff --git a/substrate/frame/scheduler/src/benchmarking.rs b/substrate/frame/scheduler/src/benchmarking.rs index 18441d54b39..884f7800038 100644 --- a/substrate/frame/scheduler/src/benchmarking.rs +++ b/substrate/frame/scheduler/src/benchmarking.rs @@ -211,7 +211,7 @@ benchmarks! { } verify { } - // `execute_dispatch` when the origin is `Signed`, not counting the dispatable's weight. + // `execute_dispatch` when the origin is `Signed`, not counting the dispatchable's weight. execute_dispatch_signed { let mut counter = WeightMeter::new(); let origin = make_origin::(true); @@ -222,7 +222,7 @@ benchmarks! { verify { } - // `execute_dispatch` when the origin is not `Signed`, not counting the dispatable's weight. + // `execute_dispatch` when the origin is not `Signed`, not counting the dispatchable's weight. execute_dispatch_unsigned { let mut counter = WeightMeter::new(); let origin = make_origin::(false); diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index af3abd8ac4f..62417b8d2cc 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -46,7 +46,7 @@ //! 1. Scheduling a runtime call at a specific block. #![doc = docify::embed!("src/tests.rs", basic_scheduling_works)] //! -//! 2. Scheduling a preimage hash of a runtime call at a specifc block +//! 2. Scheduling a preimage hash of a runtime call at a specific block #![doc = docify::embed!("src/tests.rs", scheduling_with_preimages_works)] //! diff --git a/substrate/frame/scheduler/src/tests.rs b/substrate/frame/scheduler/src/tests.rs index 1ed2ca9e2f3..bb02320ad75 100644 --- a/substrate/frame/scheduler/src/tests.rs +++ b/substrate/frame/scheduler/src/tests.rs @@ -1119,7 +1119,7 @@ fn reschedule_named_works() { } #[test] -fn reschedule_named_perodic_works() { +fn reschedule_named_periodic_works() { new_test_ext().execute_with(|| { let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_parts(10, 0) }); @@ -2980,7 +2980,7 @@ fn reschedule_named_last_task_removes_agenda() { }); } -/// Ensures that an unvailable call sends an event. +/// Ensures that an unavailable call sends an event. #[test] fn unavailable_call_is_detected() { use frame_support::traits::schedule::v3::Named; diff --git a/substrate/frame/society/src/lib.rs b/substrate/frame/society/src/lib.rs index 99dd580a403..5bce245f73f 100644 --- a/substrate/frame/society/src/lib.rs +++ b/substrate/frame/society/src/lib.rs @@ -1509,7 +1509,7 @@ impl, I: 'static> Pallet { // the Founder. if MemberCount::::get() > 2 { let defender = next_defender - .or_else(|| Self::pick_defendent(rng)) + .or_else(|| Self::pick_defendant(rng)) .expect("exited if members empty; qed"); let skeptic = Self::pick_member_except(rng, &defender).expect("exited if members empty; qed"); @@ -1871,7 +1871,7 @@ impl, I: 'static> Pallet { /// /// If only the Founder and Head members exist (or the state is inconsistent), then `None` /// may be returned. - fn pick_defendent(rng: &mut impl RngCore) -> Option { + fn pick_defendant(rng: &mut impl RngCore) -> Option { let member_count = MemberCount::::get(); if member_count <= 2 { return None @@ -1976,7 +1976,7 @@ impl, I: 'static> Pallet { /// Transfer some `amount` from the main account into the payouts account and reduce the Pot /// by this amount. fn reserve_payout(amount: BalanceOf) { - // Tramsfer payout from the Pot into the payouts account. + // Transfer payout from the Pot into the payouts account. Pot::::mutate(|pot| pot.saturating_reduce(amount)); // this should never fail since we ensure we can afford the payouts in a previous @@ -1988,7 +1988,7 @@ impl, I: 'static> Pallet { /// Transfer some `amount` from the main account into the payouts account and increase the Pot /// by this amount. fn unreserve_payout(amount: BalanceOf) { - // Tramsfer payout from the Pot into the payouts account. + // Transfer payout from the Pot into the payouts account. Pot::::mutate(|pot| pot.saturating_accrue(amount)); // this should never fail since we ensure we can afford the payouts in a previous diff --git a/substrate/frame/society/src/migrations.rs b/substrate/frame/society/src/migrations.rs index 6a102911451..8fd87b1163a 100644 --- a/substrate/frame/society/src/migrations.rs +++ b/substrate/frame/society/src/migrations.rs @@ -240,7 +240,7 @@ pub fn assert_internal_consistency, I: Instance + 'static>() { pub fn from_original, I: Instance + 'static>( past_payouts: &mut [(::AccountId, BalanceOf)], ) -> Result { - // Migrate Bids from old::Bids (just a trunctation). + // Migrate Bids from old::Bids (just a truncation). Bids::::put(BoundedVec::<_, T::MaxBids>::truncate_from(v0::Bids::::take())); // Initialise round counter. @@ -287,13 +287,13 @@ pub fn from_original, I: Instance + 'static>( .defensive_ok_or("member_count > 0, we must have at least 1 member")?; // Swap the founder with the first member in MemberByIndex. MemberByIndex::::swap(0, member_count); - // Update the indicies of the swapped member MemberRecords. + // Update the indices of the swapped member MemberRecords. Members::::mutate(&member, |m| { if let Some(member) = m { member.index = 0; } else { frame_support::defensive!( - "Member somehow disapeared from storage after it was inserted" + "Member somehow disappeared from storage after it was inserted" ); } }); @@ -302,7 +302,7 @@ pub fn from_original, I: Instance + 'static>( member.index = member_count; } else { frame_support::defensive!( - "Member somehow disapeared from storage after it was queried" + "Member somehow disappeared from storage after it was queried" ); } }); diff --git a/substrate/frame/society/src/tests.rs b/substrate/frame/society/src/tests.rs index 411567e1ded..5f8ecc9a7c5 100644 --- a/substrate/frame/society/src/tests.rs +++ b/substrate/frame/society/src/tests.rs @@ -541,7 +541,7 @@ fn suspended_candidate_rejected_works() { assert_ok!(Society::vote(Origin::signed(30), x, true)); } - // Voting continues, as no canidate is clearly accepted yet and the founder chooses not to + // Voting continues, as no candidate is clearly accepted yet and the founder chooses not to // act. conclude_intake(false, None); assert_eq!(members(), vec![10, 20, 30]); diff --git a/substrate/frame/staking/src/ledger.rs b/substrate/frame/staking/src/ledger.rs index 67be1e15bc6..9461daefed6 100644 --- a/substrate/frame/staking/src/ledger.rs +++ b/substrate/frame/staking/src/ledger.rs @@ -130,7 +130,7 @@ impl StakingLedger { // if ledger bond is in a bad state, return error to prevent applying operations that may // further spoil the ledger's state. A bond is in bad state when the bonded controller is - // associted with a different ledger (i.e. a ledger with a different stash). + // associated with a different ledger (i.e. a ledger with a different stash). // // See for more details. ensure!( diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 3e3b1113a81..d5b18421d5b 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -30,7 +30,7 @@ use frame_support::ensure; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; -/// Used for release versioning upto v12. +/// Used for release versioning up to v12. /// /// Obsolete from v13. Keeping around to make encoding/decoding of old migration code easier. #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index d42456e53b1..407b301fad2 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -1906,7 +1906,7 @@ impl Pallet { /// Invariants: /// * A bonded ledger should always have an assigned `Payee`. /// * The number of entries in `Payee` and of bonded staking ledgers *must* match. - /// * The stash account in the ledger must match that of the bonded acount. + /// * The stash account in the ledger must match that of the bonded account. fn check_payees() -> Result<(), TryRuntimeError> { for (stash, _) in Bonded::::iter() { ensure!(Payee::::get(&stash).is_some(), "bonded ledger does not have payee set"); @@ -1947,7 +1947,7 @@ impl Pallet { /// Invariants: /// * `ledger.controller` is not stored in the storage (but populated at retrieval). /// * Stake consistency: ledger.total == ledger.active + sum(ledger.unlocking). - /// * The controller keyeing the ledger and the ledger stash matches the state of the `Bonded` + /// * The controller keying the ledger and the ledger stash matches the state of the `Bonded` /// storage. fn check_ledgers() -> Result<(), TryRuntimeError> { Bonded::::iter() diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 1bf8bd8b09c..6afbf12032d 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -54,7 +54,7 @@ use crate::{ }; // The speculative number of spans are used as an input of the weight annotation of -// [`Call::unbond`], as the post dipatch weight may depend on the number of slashing span on the +// [`Call::unbond`], as the post dispatch weight may depend on the number of slashing span on the // account which is not provided as an input. The value set should be conservative but sensible. pub(crate) const SPECULATIVE_NUM_SPANS: u32 = 32; @@ -1134,7 +1134,7 @@ pub mod pallet { /// this call results in a complete removal of all the data related to the stash account. /// In this case, the `num_slashing_spans` must be larger or equal to the number of /// slashing spans associated with the stash account in the [`SlashingSpans`] storage type, - /// otherwise the call will fail. The call weight is directly propotional to + /// otherwise the call will fail. The call weight is directly proportional to /// `num_slashing_spans`. /// /// ## Complexity @@ -1376,7 +1376,7 @@ pub mod pallet { Ok(()) } - /// Increments the ideal number of validators upto maximum of + /// Increments the ideal number of validators up to maximum of /// `ElectionProviderBase::MaxWinners`. /// /// The dispatch origin must be Root. @@ -1401,7 +1401,7 @@ pub mod pallet { Ok(()) } - /// Scale up the ideal number of validators by a factor upto maximum of + /// Scale up the ideal number of validators by a factor up to maximum of /// `ElectionProviderBase::MaxWinners`. /// /// The dispatch origin must be Root. diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 3725c9e3c2c..ef156e19552 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -270,7 +270,7 @@ fn change_controller_works() { assert_eq!(ledger.controller(), Some(stash)); // the raw storage ledger's controller is always `None`. however, we can still fetch the - // correct controller with `ledger.controler()`. + // correct controller with `ledger.controller()`. let raw_ledger = >::get(&stash).unwrap(); assert_eq!(raw_ledger.controller, None); @@ -1257,7 +1257,7 @@ fn bond_extra_controller_bad_state_works() { Bonded::::insert(31, 41); // we confirm that the ledger is in bad state: 31 has 41 as controller and when fetching - // the ledger associated with the controler 41, its stash is 41 (and not 31). + // the ledger associated with the controller 41, its stash is 41 (and not 31). assert_eq!(Ledger::::get(41).unwrap().stash, 41); // if the ledger is in this bad state, the `bond_extra` should fail. @@ -1815,7 +1815,7 @@ fn max_staked_rewards_works() { let total_payout = treasury_payout + validators_payout; // max stakers payout (without max staked rewards cap applied) is larger than the final - // validator rewards. The final payment and remainder should be adjusted by redestributing + // validator rewards. The final payment and remainder should be adjusted by redistributing // the era inflation to apply the cap... assert!(max_stakers_payout > validators_payout); @@ -4686,7 +4686,7 @@ fn bond_during_era_does_not_populate_legacy_claimed_rewards() { } ); - // make sure only era upto history depth is stored + // make sure only era up to history depth is stored let current_era = 99; mock::start_active_era(current_era); bond_validator(13, 1000); @@ -5412,7 +5412,7 @@ mod election_data_provider { Event::SnapshotVotersSizeExceeded { size: 75 } ); - // however, if the election voter size bounds were largers, the snapshot would + // however, if the election voter size bounds were larger, the snapshot would // include the electing voters of 70. let bounds = ElectionBoundsBuilder::default().voters_size(1_000.into()).build(); assert_eq!( diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs index d8b734e6bbe..63b68e69430 100644 --- a/substrate/frame/sudo/src/lib.rs +++ b/substrate/frame/sudo/src/lib.rs @@ -38,7 +38,7 @@ //! In Substrate blockchains, pallets may contain dispatchable calls that can only be called at //! the system level of the chain (i.e. dispatchables that require a `Root` origin). //! Setting a privileged account, called the _sudo key_, allows you to make such calls as an -//! extrinisic. +//! extrinsic. //! //! Here's an example of a privileged function in another pallet: //! diff --git a/substrate/frame/support/procedural/src/benchmark.rs b/substrate/frame/support/procedural/src/benchmark.rs index ea53ad263a1..0a62c3f92a6 100644 --- a/substrate/frame/support/procedural/src/benchmark.rs +++ b/substrate/frame/support/procedural/src/benchmark.rs @@ -249,7 +249,7 @@ impl BenchmarkCallDef { } } -/// Represents a parsed `#[benchmark]` or `#[instance_banchmark]` item. +/// Represents a parsed `#[benchmark]` or `#[instance_benchmark]` item. #[derive(Clone)] struct BenchmarkDef { params: Vec, @@ -466,7 +466,7 @@ pub fn benchmarks( let mod_vis = module.vis; let mod_name = module.ident; - // consume #[benchmarks] attribute by exclusing it from mod_attrs + // consume #[benchmarks] attribute by excluding it from mod_attrs let mod_attrs: Vec<&Attribute> = module .attrs .iter() diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 1937dfa9ca4..b083abbb2a8 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -24,7 +24,7 @@ //! - Implicitly: `System: frame_system` //! - Explicitly: `System: frame_system::{Pallet, Call}` //! -//! The `construct_runtime` transitions from the implicit definition to the explict one. +//! The `construct_runtime` transitions from the implicit definition to the explicit one. //! From the explicit state, Substrate expands the pallets with additional information //! that is to be included in the runtime metadata. This expansion makes visible some extra //! parts of the pallets, mainly the `Error` if defined. The expanded state looks like @@ -55,7 +55,7 @@ //! +----------+ +------------------+ //! ``` //! -//! When all pallet parts are implcit, then the `construct_runtime!` macro expands to its final +//! When all pallet parts are implicit, then the `construct_runtime!` macro expands to its final //! state, the `ExplicitExpanded`. Otherwise, all implicit parts are converted to an explicit //! expanded part allow the `construct_runtime!` to expand any remaining explicit parts to an //! explicit expanded part. @@ -202,7 +202,7 @@ //! Similarly to the previous transition, the macro expansion transforms `System: //! frame_system::{Pallet, Call}` into `System: frame_system expanded::{Error} ::{Pallet, Call}`. //! The `expanded` section adds extra parts that the Substrate would like to expose for each pallet -//! by default. This is done to expose the approprite types for metadata construction. +//! by default. This is done to expose the appropriate types for metadata construction. //! //! This time, instead of calling `tt_default_parts` we are using the `tt_extra_parts` macro. //! This macro returns the ` :: expanded { Error }` list of additional parts we would like to diff --git a/substrate/frame/support/procedural/src/construct_runtime/parse.rs b/substrate/frame/support/procedural/src/construct_runtime/parse.rs index 88f3f14dc86..31866c787b0 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/parse.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/parse.rs @@ -322,7 +322,7 @@ impl Parse for PalletDeclaration { /// A struct representing a path to a pallet. `PalletPath` is almost identical to the standard /// Rust path with a few restrictions: /// - No leading colons allowed -/// - Path segments can only consist of identifers separated by colons +/// - Path segments can only consist of identifiers separated by colons #[derive(Debug, Clone)] pub struct PalletPath { pub inner: Path, @@ -595,7 +595,7 @@ pub struct Pallet { pub is_expanded: bool, /// The name of the pallet, e.g.`System` in `System: frame_system`. pub name: Ident, - /// Either automatically infered, or defined (e.g. `MyPallet ... = 3,`). + /// Either automatically inferred, or defined (e.g. `MyPallet ... = 3,`). pub index: u8, /// The path of the pallet, e.g. `frame_system` in `System: frame_system`. pub path: PalletPath, @@ -634,7 +634,7 @@ impl Pallet { /// +----------+ +----------+ +------------------+ /// ``` enum PalletsConversion { - /// Pallets implicitely declare parts. + /// Pallets implicitly declare parts. /// /// `System: frame_system`. Implicit(Vec), @@ -648,7 +648,7 @@ enum PalletsConversion { /// Pallets explicitly declare parts that are fully expanded. /// /// This is the end state that contains extra parts included by - /// default by Subtrate. + /// default by Substrate. /// /// `System: frame_system expanded::{Error} ::{Pallet, Call}` /// @@ -660,7 +660,7 @@ enum PalletsConversion { /// /// Check if all pallet have explicit declaration of their parts, if so then assign index to each /// pallet using same rules as rust for fieldless enum. I.e. implicit are assigned number -/// incrementedly from last explicit or 0. +/// incrementally from last explicit or 0. fn convert_pallets(pallets: Vec) -> syn::Result { if pallets.iter().any(|pallet| pallet.pallet_parts.is_none()) { return Ok(PalletsConversion::Implicit(pallets)) diff --git a/substrate/frame/support/procedural/src/derive_impl.rs b/substrate/frame/support/procedural/src/derive_impl.rs index 8740ccd401a..54755f1163a 100644 --- a/substrate/frame/support/procedural/src/derive_impl.rs +++ b/substrate/frame/support/procedural/src/derive_impl.rs @@ -279,7 +279,7 @@ fn test_runtime_type_with_doc() { } #[test] -fn test_disambugation_path() { +fn test_disambiguation_path() { let foreign_impl: ItemImpl = parse_quote!(impl SomeTrait for SomeType {}); let default_impl_path: Path = parse_quote!(SomeScope::SomeType); diff --git a/substrate/frame/support/procedural/src/dynamic_params.rs b/substrate/frame/support/procedural/src/dynamic_params.rs index b718ccbc955..29399a885bc 100644 --- a/substrate/frame/support/procedural/src/dynamic_params.rs +++ b/substrate/frame/support/procedural/src/dynamic_params.rs @@ -147,8 +147,8 @@ fn ensure_codec_index(attrs: &Vec, span: Span) -> Result<()> { /// Used to inject arguments into the inner `#[dynamic_pallet_params(..)]` attribute. /// -/// This allows the outer `#[dynamic_params(..)]` attribute to specify some arguments that dont need -/// to be repeated every time. +/// This allows the outer `#[dynamic_params(..)]` attribute to specify some arguments that don't +/// need to be repeated every time. struct MacroInjectArgs { runtime_params: syn::Ident, params_pallet: syn::Type, @@ -311,7 +311,7 @@ impl ToTokens for DynamicPalletParamAttr { )* } - impl #scrate::traits::dynamic_params::AggregratedKeyValue for Parameters { + impl #scrate::traits::dynamic_params::AggregatedKeyValue for Parameters { type Key = #key_ident; type Value = #value_ident; @@ -497,7 +497,7 @@ impl ToTokens for DynamicParamAggregatedEnum { #vis enum #params_key_ident { #( #(#attributes)* - #param_names(<#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Key), + #param_names(<#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Key), )* } @@ -515,11 +515,11 @@ impl ToTokens for DynamicParamAggregatedEnum { #vis enum #params_value_ident { #( #(#attributes)* - #param_names(<#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Value), + #param_names(<#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Value), )* } - impl #scrate::traits::dynamic_params::AggregratedKeyValue for #name { + impl #scrate::traits::dynamic_params::AggregatedKeyValue for #name { type Key = #params_key_ident; type Value = #params_value_ident; @@ -536,13 +536,13 @@ impl ToTokens for DynamicParamAggregatedEnum { } #( - impl ::core::convert::From<<#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Key> for #params_key_ident { - fn from(key: <#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Key) -> Self { + impl ::core::convert::From<<#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Key> for #params_key_ident { + fn from(key: <#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Key) -> Self { #params_key_ident::#param_names(key) } } - impl ::core::convert::TryFrom<#params_value_ident> for <#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Value { + impl ::core::convert::TryFrom<#params_value_ident> for <#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Value { type Error = (); fn try_from(value: #params_value_ident) -> Result { diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index dee6d522d25..bc62c0509b0 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -137,7 +137,7 @@ fn counter_prefix(prefix: &str) -> String { /// - `Call` - If the pallet has callable functions /// - `Storage` - If the pallet uses storage /// - `Event` or `Event` (if the event is generic) - If the pallet emits events -/// - `Origin` or `Origin` (if the origin is generic) - If the pallet has instanciable origins +/// - `Origin` or `Origin` (if the origin is generic) - If the pallet has instantiable origins /// - `Config` or `Config` (if the config is generic) - If the pallet builds the genesis /// storage with `GenesisConfig` /// - `Inherent` - If the pallet provides/can check inherents. @@ -166,7 +166,7 @@ fn counter_prefix(prefix: &str) -> String { /// and `Event` are encoded, and to define the ModuleToIndex value. /// /// if `= $n` is not given, then index is resolved in the same way as fieldless enum in Rust -/// (i.e. incrementedly from previous index): +/// (i.e. incrementally from previous index): /// ```nocompile /// pallet1 .. = 2, /// pallet2 .., // Here pallet2 is given index 3 @@ -460,7 +460,7 @@ pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream } /// This attribute can be used to derive a full implementation of a trait based on a local partial -/// impl and an external impl containing defaults that can be overriden in the local impl. +/// impl and an external impl containing defaults that can be overridden in the local impl. /// /// For a full end-to-end example, see [below](#use-case-auto-derive-test-pallet-config-traits). /// diff --git a/substrate/frame/support/procedural/src/pallet/parse/mod.rs b/substrate/frame/support/procedural/src/pallet/parse/mod.rs index b55f130a93a..6e12774611d 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/mod.rs @@ -321,7 +321,7 @@ impl Def { Ok(()) } - /// Tries to locate a manual tasks impl (an impl impling a trait whose last path segment is + /// Tries to locate a manual tasks impl (an impl implementing a trait whose last path segment is /// `Task`) in the event that one has not been found already via the attribute macro pub fn resolve_manual_tasks_impl( tasks: &mut Option, diff --git a/substrate/frame/support/src/dispatch_context.rs b/substrate/frame/support/src/dispatch_context.rs index 608187b7220..254302c8f14 100644 --- a/substrate/frame/support/src/dispatch_context.rs +++ b/substrate/frame/support/src/dispatch_context.rs @@ -18,7 +18,7 @@ //! Provides functions to interact with the dispatch context. //! //! A Dispatch context is created by calling [`run_in_context`] and then the given closure will be -//! executed in this dispatch context. Everyting run in this `closure` will have access to the same +//! executed in this dispatch context. Everything run in this `closure` will have access to the same //! dispatch context. This also applies to nested calls of [`run_in_context`]. The dispatch context //! can be used to store and retrieve information locally in this context. The dispatch context can //! be accessed by using [`with_context`]. This function will execute the given closure and give it @@ -51,7 +51,7 @@ //! //! run_in_context(|| { //! with_context::(|v| { -//! // Intitialize the value to the default value. +//! // Initialize the value to the default value. //! assert_eq!(0, v.or_default().0); //! v.or_default().0 = 10; //! }); diff --git a/substrate/frame/support/src/instances.rs b/substrate/frame/support/src/instances.rs index 396018d5cbd..ecb356af50b 100644 --- a/substrate/frame/support/src/instances.rs +++ b/substrate/frame/support/src/instances.rs @@ -31,83 +31,83 @@ //! NOTE: [`frame_support::pallet`] will reexport them inside the module, in order to make them //! accessible to [`frame_support::construct_runtime`]. -/// `Instance1` to be used for instantiable palllets defined with the +/// `Instance1` to be used for instantiable pallets defined with the /// [`#[pallet]`](`frame_support::pallet`) macro. Instances 2-16 are also available but are hidden /// from docs. #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance1; -/// `Instance2` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance2` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance2; -/// `Instance3` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance3` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance3; -/// `Instance4` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance4` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance4; -/// `Instance5` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance5` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance5; -/// `Instance6` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance6` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance6; -/// `Instance7` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance7` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance7; -/// `Instance8` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance8` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance8; -/// `Instance9` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance9` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance9; -/// `Instance10` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance10` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance10; -/// `Instance11` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance11` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance11; -/// `Instance12` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance12` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance12; -/// `Instance13` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance13` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance13; -/// `Instance14` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance14` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance14; -/// `Instance15` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance15` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance15; -/// `Instance16` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance16` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance16; diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index c524f8953a4..895215d364e 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -145,7 +145,7 @@ impl TypeId for PalletId { /// # Examples /// /// There are different ways to declare the `prefix` to use. The `prefix` type can either be -/// declared explicetly by passing it to the macro as an attribute or by letting the macro +/// declared explicitly by passing it to the macro as an attribute or by letting the macro /// guess on what the `prefix` type is. The `prefix` is always passed as the first generic /// argument to the type declaration. When using [`#[pallet::storage]`](pallet_macros::storage) /// this first generic argument is always `_`. Besides declaring the `prefix`, the rest of the @@ -1110,7 +1110,7 @@ pub mod pallet_macros { /// Declares a storage as unbounded in potential size. /// - /// When implementating the storage info (when `#[pallet::generate_storage_info]` is + /// When implementing the storage info (when `#[pallet::generate_storage_info]` is /// specified on the pallet struct placeholder), the size of the storage will be declared /// as unbounded. This can be useful for storage which can never go into PoV (Proof of /// Validity). @@ -2340,7 +2340,7 @@ pub mod pallet_macros { /// Allows defining conditions for a task to run. /// - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the conditions for a /// given work item to be valid. /// @@ -2351,7 +2351,7 @@ pub mod pallet_macros { /// Allows defining an index for a task. /// - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the index of a given /// work item. /// @@ -2361,7 +2361,7 @@ pub mod pallet_macros { /// Allows defining an iterator over available work items for a task. /// - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`). /// /// It takes an iterator as input that yields a tuple with same types as the function @@ -2370,7 +2370,7 @@ pub mod pallet_macros { /// Allows defining the weight of a task. /// - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) define the weight of a given work /// item. /// diff --git a/substrate/frame/support/src/storage/migration.rs b/substrate/frame/support/src/storage/migration.rs index 568c475bdc6..252625cf4f7 100644 --- a/substrate/frame/support/src/storage/migration.rs +++ b/substrate/frame/support/src/storage/migration.rs @@ -303,11 +303,11 @@ pub fn take_storage_item /// Move a storage from a pallet prefix to another pallet prefix. /// /// Keys used in pallet storages always start with: -/// `concat(twox_128(pallet_name), towx_128(storage_name))`. +/// `concat(twox_128(pallet_name), twox_128(storage_name))`. /// /// This function will remove all value for which the key start with -/// `concat(twox_128(old_pallet_name), towx_128(storage_name))` and insert them at the key with -/// the start replaced by `concat(twox_128(new_pallet_name), towx_128(storage_name))`. +/// `concat(twox_128(old_pallet_name), twox_128(storage_name))` and insert them at the key with +/// the start replaced by `concat(twox_128(new_pallet_name), twox_128(storage_name))`. /// /// # Example /// @@ -339,7 +339,7 @@ pub fn move_storage_from_pallet( /// Move all storages from a pallet prefix to another pallet prefix. /// /// Keys used in pallet storages always start with: -/// `concat(twox_128(pallet_name), towx_128(storage_name))`. +/// `concat(twox_128(pallet_name), twox_128(storage_name))`. /// /// This function will remove all value for which the key start with `twox_128(old_pallet_name)` /// and insert them at the key with the start replaced by `twox_128(new_pallet_name)`. diff --git a/substrate/frame/support/src/storage/mod.rs b/substrate/frame/support/src/storage/mod.rs index 8ebe7b31da8..f7d7447482d 100644 --- a/substrate/frame/support/src/storage/mod.rs +++ b/substrate/frame/support/src/storage/mod.rs @@ -159,7 +159,7 @@ pub trait StorageValue { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_len() -> Option where @@ -363,7 +363,7 @@ pub trait StorageMap { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_len>(key: KeyArg) -> Option where @@ -381,7 +381,8 @@ pub trait StorageMap { /// /// # Warning /// - /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// - `None` does not mean that `get()` does not return a value. The default value is + /// completely /// ignored by this function. /// /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This @@ -410,7 +411,7 @@ pub trait StorageMap { pub trait IterableStorageMap: StorageMap { /// The type that iterates over all `(key, value)`. type Iterator: Iterator; - /// The type that itereates over all `key`s. + /// The type that iterates over all `key`s. type KeyIterator: Iterator; /// Enumerate all elements in the map in lexicographical order of the encoded key. If you @@ -777,7 +778,7 @@ pub trait StorageDoubleMap { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_len(key1: KArg1, key2: KArg2) -> Option where @@ -798,7 +799,7 @@ pub trait StorageDoubleMap { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_non_dedup_len(key1: KArg1, key2: KArg2) -> Option where @@ -980,7 +981,7 @@ pub trait StorageNMap { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_len + TupleToEncodedIter>(key: KArg) -> Option where @@ -1488,8 +1489,8 @@ pub trait StorageDecodeLength: private::Sealed + codec::DecodeLength { } } -/// It is expected that the length is at the beginning of the encoded objectand that the length is a -/// `Compact`. +/// It is expected that the length is at the beginning of the encoded object and that the length is +/// a `Compact`. /// /// # Note /// The length returned by this trait is not deduplicated, i.e. it is the length of the underlying @@ -1790,7 +1791,7 @@ mod test { }); } - // This test ensures that the Digest encoding does not change without being noticied. + // This test ensures that the Digest encoding does not change without being noticed. #[test] fn digest_storage_append_works_as_expected() { TestExternalities::default().execute_with(|| { diff --git a/substrate/frame/support/src/storage/stream_iter.rs b/substrate/frame/support/src/storage/stream_iter.rs index 2205601938b..529b2f387c7 100644 --- a/substrate/frame/support/src/storage/stream_iter.rs +++ b/substrate/frame/support/src/storage/stream_iter.rs @@ -217,7 +217,7 @@ const STORAGE_INPUT_BUFFER_CAPACITY: usize = 2 * 1024; /// Implementation of [`codec::Input`] using [`sp_io::storage::read`]. /// /// Keeps an internal buffer with a size of [`STORAGE_INPUT_BUFFER_CAPACITY`]. All read accesses -/// are tried to be served by this buffer. If the buffer doesn't hold enough bytes to fullfill the +/// are tried to be served by this buffer. If the buffer doesn't hold enough bytes to fulfill the /// current read access, the buffer is re-filled from the state. A read request that is bigger than /// the internal buffer is directly forwarded to the state to reduce the number of reads from the /// state. diff --git a/substrate/frame/support/src/storage/types/counted_map.rs b/substrate/frame/support/src/storage/types/counted_map.rs index 04e69751c16..0444e269928 100644 --- a/substrate/frame/support/src/storage/types/counted_map.rs +++ b/substrate/frame/support/src/storage/types/counted_map.rs @@ -310,7 +310,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len>(key: KeyArg) -> Option where @@ -775,13 +775,13 @@ mod test { assert_eq!(A::try_get(1), Err(())); assert_eq!(A::count(), 3); - // Take exsisting. + // Take existing. assert_eq!(A::take(4), 10); assert_eq!(A::try_get(4), Err(())); assert_eq!(A::count(), 2); - // Take non-exsisting. + // Take non-existing. assert_eq!(A::take(4), ADefault::get()); assert_eq!(A::try_get(4), Err(())); @@ -1022,13 +1022,13 @@ mod test { assert_eq!(B::try_get(1), Err(())); assert_eq!(B::count(), 3); - // Take exsisting. + // Take existing. assert_eq!(B::take(4), Some(10)); assert_eq!(B::try_get(4), Err(())); assert_eq!(B::count(), 2); - // Take non-exsisting. + // Take non-existing. assert_eq!(B::take(4), None); assert_eq!(B::try_get(4), Err(())); diff --git a/substrate/frame/support/src/storage/types/counted_nmap.rs b/substrate/frame/support/src/storage/types/counted_nmap.rs index 279894ee973..51cde93f28c 100644 --- a/substrate/frame/support/src/storage/types/counted_nmap.rs +++ b/substrate/frame/support/src/storage/types/counted_nmap.rs @@ -378,7 +378,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len + TupleToEncodedIter>( key: KArg, diff --git a/substrate/frame/support/src/storage/types/double_map.rs b/substrate/frame/support/src/storage/types/double_map.rs index cb9479d491c..2a7af7a9846 100644 --- a/substrate/frame/support/src/storage/types/double_map.rs +++ b/substrate/frame/support/src/storage/types/double_map.rs @@ -445,7 +445,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len(key1: KArg1, key2: KArg2) -> Option where @@ -465,7 +465,8 @@ where /// /// # Warning /// - /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// - `None` does not mean that `get()` does not return a value. The default value is + /// completely /// ignored by this function. /// /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This diff --git a/substrate/frame/support/src/storage/types/map.rs b/substrate/frame/support/src/storage/types/map.rs index ee5db74583b..b79a6ae9b84 100644 --- a/substrate/frame/support/src/storage/types/map.rs +++ b/substrate/frame/support/src/storage/types/map.rs @@ -277,7 +277,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len>(key: KeyArg) -> Option where @@ -295,7 +295,8 @@ where /// /// # Warning /// - /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// - `None` does not mean that `get()` does not return a value. The default value is + /// completely /// ignored by this function. /// /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This diff --git a/substrate/frame/support/src/storage/types/mod.rs b/substrate/frame/support/src/storage/types/mod.rs index 9dd6f4066e4..631410f425d 100644 --- a/substrate/frame/support/src/storage/types/mod.rs +++ b/substrate/frame/support/src/storage/types/mod.rs @@ -195,7 +195,7 @@ mod test { // result query returns error assert_eq!(C::get(), Err(())); - // value query with custom onempty returns 42 + // value query with custom on empty returns 42 assert_eq!(D::get(), 42); }); } diff --git a/substrate/frame/support/src/storage/types/nmap.rs b/substrate/frame/support/src/storage/types/nmap.rs index 0723db68900..253f02a14f0 100755 --- a/substrate/frame/support/src/storage/types/nmap.rs +++ b/substrate/frame/support/src/storage/types/nmap.rs @@ -348,7 +348,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len + TupleToEncodedIter>( key: KArg, diff --git a/substrate/frame/support/src/storage/types/value.rs b/substrate/frame/support/src/storage/types/value.rs index 263091dd252..a2d93a6a165 100644 --- a/substrate/frame/support/src/storage/types/value.rs +++ b/substrate/frame/support/src/storage/types/value.rs @@ -225,7 +225,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len() -> Option where @@ -243,7 +243,8 @@ where /// /// # Warning /// - /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// - `None` does not mean that `get()` does not return a value. The default value is + /// completely /// ignored by this function. /// /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This diff --git a/substrate/frame/support/src/traits/dispatch.rs b/substrate/frame/support/src/traits/dispatch.rs index de50ce7a26c..7dc8d3e4f5a 100644 --- a/substrate/frame/support/src/traits/dispatch.rs +++ b/substrate/frame/support/src/traits/dispatch.rs @@ -540,7 +540,7 @@ pub trait OriginTrait: Sized { }) } - /// Extract a reference to the sytsem origin, if that's what the caller is. + /// Extract a reference to the system origin, if that's what the caller is. fn as_system_ref(&self) -> Option<&RawOrigin> { self.caller().as_system_ref() } diff --git a/substrate/frame/support/src/traits/dynamic_params.rs b/substrate/frame/support/src/traits/dynamic_params.rs index 8881df04141..32dae6799ea 100644 --- a/substrate/frame/support/src/traits/dynamic_params.rs +++ b/substrate/frame/support/src/traits/dynamic_params.rs @@ -25,30 +25,30 @@ use frame_support::Parameter; /// A dynamic parameter store across an aggregated KV type. pub trait RuntimeParameterStore { - type AggregratedKeyValue: AggregratedKeyValue; + type AggregatedKeyValue: AggregatedKeyValue; /// Get the value of a parametrized key. /// /// Should return `None` if no explicit value was set instead of a default. fn get(key: K) -> Option where - KV: AggregratedKeyValue, - K: Key + Into<::Key>, - ::Key: IntoKey< - <::AggregratedKeyValue as AggregratedKeyValue>::Key, + KV: AggregatedKeyValue, + K: Key + Into<::Key>, + ::Key: IntoKey< + <::AggregatedKeyValue as AggregatedKeyValue>::Key, >, - <::AggregratedKeyValue as AggregratedKeyValue>::Value: - TryIntoKey<::Value>, - ::Value: TryInto; + <::AggregatedKeyValue as AggregatedKeyValue>::Value: + TryIntoKey<::Value>, + ::Value: TryInto; } /// A dynamic parameter store across a concrete KV type. -pub trait ParameterStore { +pub trait ParameterStore { /// Get the value of a parametrized key. fn get(key: K) -> Option where - K: Key + Into<::Key>, - ::Value: TryInto; + K: Key + Into<::Key>, + ::Value: TryInto; } /// Key of a dynamic parameter. @@ -61,7 +61,7 @@ pub trait Key { } /// The aggregated key-value type of a dynamic parameter store. -pub trait AggregratedKeyValue: Parameter { +pub trait AggregatedKeyValue: Parameter { /// The aggregated key type. type Key: Parameter + MaxEncodedLen; @@ -72,7 +72,7 @@ pub trait AggregratedKeyValue: Parameter { fn into_parts(self) -> (Self::Key, Option); } -impl AggregratedKeyValue for () { +impl AggregatedKeyValue for () { type Key = (); type Value = (); @@ -90,17 +90,17 @@ pub struct ParameterStoreAdapter(sp_std::marker::PhantomData<(PS, KV)>); impl ParameterStore for ParameterStoreAdapter where PS: RuntimeParameterStore, - KV: AggregratedKeyValue, - ::Key: - IntoKey<<::AggregratedKeyValue as AggregratedKeyValue>::Key>, - ::Value: TryFromKey< - <::AggregratedKeyValue as AggregratedKeyValue>::Value, + KV: AggregatedKeyValue, + ::Key: + IntoKey<<::AggregatedKeyValue as AggregatedKeyValue>::Key>, + ::Value: TryFromKey< + <::AggregatedKeyValue as AggregatedKeyValue>::Value, >, { fn get(key: K) -> Option where - K: Key + Into<::Key>, - ::Value: TryInto, + K: Key + Into<::Key>, + ::Value: TryInto, { PS::get::(key) } diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index 7d0e5aa1e89..d83e2704745 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -167,7 +167,7 @@ pub trait OnGenesis { /// /// This hook is intended to be used internally in FRAME and not be exposed to FRAME developers. /// -/// It is defined as a seperate trait from [`OnRuntimeUpgrade`] precisely to not pollute the public +/// It is defined as a separate trait from [`OnRuntimeUpgrade`] precisely to not pollute the public /// API. pub trait BeforeAllRuntimeMigrations { /// Something that should happen before runtime migrations are executed. diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 1f634a64282..bc7407a7be6 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -1424,7 +1424,7 @@ mod test { assert_eq!(>::max_encoded_len(), 2usize.pow(14) + 4); let data = 4u64; - // Ensure that we check that the `Vec` is consumed completly on decode. + // Ensure that we check that the `Vec` is consumed completely on decode. assert!(WrapperOpaque::::decode(&mut &data.encode().encode()[..]).is_err()); } diff --git a/substrate/frame/support/src/traits/tokens/fungible/conformance_tests/regular/unbalanced.rs b/substrate/frame/support/src/traits/tokens/fungible/conformance_tests/regular/unbalanced.rs index e7fcc15472e..a5220736898 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/conformance_tests/regular/unbalanced.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/conformance_tests/regular/unbalanced.rs @@ -94,7 +94,7 @@ where ); assert_eq!(T::balance(&account_0), account_0_initial_balance - amount); - // Decreasing the balance below funds avalibale should fail when Precision::Exact + // Decreasing the balance below funds available should fail when Precision::Exact let balance_before = T::balance(&account_0); assert_eq!( T::decrease_balance( diff --git a/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs b/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs index c1afac35fc9..59a582389ba 100644 --- a/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs +++ b/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Means for splitting an imbalance into two and hanlding them differently. +//! Means for splitting an imbalance into two and handling them differently. use super::super::imbalance::{Imbalance, OnUnbalanced}; use sp_runtime::traits::Saturating; diff --git a/substrate/frame/support/src/traits/tokens/misc.rs b/substrate/frame/support/src/traits/tokens/misc.rs index fd497bc4eda..a4dd5e49142 100644 --- a/substrate/frame/support/src/traits/tokens/misc.rs +++ b/substrate/frame/support/src/traits/tokens/misc.rs @@ -130,7 +130,7 @@ impl WithdrawConsequence { pub enum DepositConsequence { /// Deposit couldn't happen due to the amount being too low. This is usually because the /// account doesn't yet exist and the deposit wouldn't bring it to at least the minimum needed - /// for existance. + /// for existence. BelowMinimum, /// Deposit cannot happen since the account cannot be created (usually because it's a consumer /// and there exists no provider reference). diff --git a/substrate/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs b/substrate/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs index 332e1e78730..3f3148a03a9 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs @@ -50,7 +50,7 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} #[derive_impl(FourLeggedAnimal as Animal)] struct Something {} diff --git a/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs b/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs index bc118acdf1e..7093c32400b 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs @@ -50,14 +50,14 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} // Should throw: `error: cannot find macro `__export_tokens_tt_tiger` in this scope` // // Note that there is really no better way to clean up this error, tt_call suffers from the // same downside but this is really the only rough edge when using macro magic. #[derive_impl(Tiger as Animal)] -impl Animal for AcquaticMammal { +impl Animal for AquaticMammal { type Locomotion = (Swims, RunsOnFourLegs); type Environment = (Land, Sea); } diff --git a/substrate/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs b/substrate/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs index 9535ac3deda..8e0253e45c1 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs @@ -50,10 +50,10 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} #[derive_impl(FourLeggedAnimal as Insect)] -impl Animal for AcquaticMammal { +impl Animal for AquaticMammal { type Locomotion = (Swims, RunsOnFourLegs); type Environment = (Land, Sea); } diff --git a/substrate/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs b/substrate/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs index f26c28313e5..4914ceea0b6 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs @@ -50,10 +50,10 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} #[derive_impl(FourLeggedAnimal as)] -impl Animal for AcquaticMammal { +impl Animal for AquaticMammal { type Locomotion = (Swims, RunsOnFourLegs); type Environment = (Land, Sea); } diff --git a/substrate/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs b/substrate/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs index 37c0742f195..20744b8cba2 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs @@ -51,19 +51,19 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} // without omitting the `as X` #[derive_impl(FourLeggedAnimal as Animal)] -impl Animal for AcquaticMammal { +impl Animal for AquaticMammal { type Locomotion = (Swims, RunsOnFourLegs); type Environment = (Land, Sea); } -assert_type_eq_all!(::Locomotion, (Swims, RunsOnFourLegs)); -assert_type_eq_all!(::Environment, (Land, Sea)); -assert_type_eq_all!(::Diet, Omnivore); -assert_type_eq_all!(::SleepingStrategy, Diurnal); +assert_type_eq_all!(::Locomotion, (Swims, RunsOnFourLegs)); +assert_type_eq_all!(::Environment, (Land, Sea)); +assert_type_eq_all!(::Diet, Omnivore); +assert_type_eq_all!(::SleepingStrategy, Diurnal); pub struct Lion {} diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs index 8e8079b183c..6c71b544426 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs @@ -82,7 +82,7 @@ fn module_error_outer_enum_expand_explicit() { // Check that all error types are propagated match RuntimeError::Example(pallet::Error::InsufficientProposersBalance) { - // Error passed implicitely to the pallet system. + // Error passed implicitly to the pallet system. RuntimeError::System(system) => match system { frame_system::Error::InvalidSpecName => (), frame_system::Error::SpecVersionNeedsToIncrease => (), diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs index 08b0b919e21..79828119742 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs @@ -62,11 +62,11 @@ frame_support::construct_runtime!( // Exclude part `Storage` in order not to check its metadata in tests. System: frame_system exclude_parts { Storage }, - // Pallet exposes `Error` implicitely. + // Pallet exposes `Error` implicitly. Example: common::outer_enums::pallet, Instance1Example: common::outer_enums::pallet::, - // Pallet exposes `Error` implicitely. + // Pallet exposes `Error` implicitly. Example2: common::outer_enums::pallet2, Instance1Example2: common::outer_enums::pallet2::, @@ -82,7 +82,7 @@ fn module_error_outer_enum_expand_implicit() { // Check that all error types are propagated match RuntimeError::Example(pallet::Error::InsufficientProposersBalance) { - // Error passed implicitely to the pallet system. + // Error passed implicitly to the pallet system. RuntimeError::System(system) => match system { frame_system::Error::InvalidSpecName => (), frame_system::Error::SpecVersionNeedsToIncrease => (), diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs b/substrate/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs index f40d1040858..ddccd0b3e19 100644 --- a/substrate/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs +++ b/substrate/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs @@ -18,7 +18,7 @@ use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; -// If, for whatever reason, you dont to not use a `WeightInfo` trait - it will still work. +// If, for whatever reason, you don't to not use a `WeightInfo` trait - it will still work. struct Impl; impl Impl { diff --git a/substrate/frame/system/src/limits.rs b/substrate/frame/system/src/limits.rs index 5fd7a5af875..ab5a98a6b97 100644 --- a/substrate/frame/system/src/limits.rs +++ b/substrate/frame/system/src/limits.rs @@ -378,7 +378,7 @@ impl BlockWeightsBuilder { /// class, based on the allowance. /// /// This is to make sure that extrinsics don't stay forever in the pool, - /// because they could seamingly fit the block (since they are below `max_block`), + /// because they could seemingly fit the block (since they are below `max_block`), /// but the cost of calling `on_initialize` always prevents them from being included. pub fn avg_block_initialization(mut self, init_cost: Perbill) -> Self { self.init_cost = Some(init_cost); diff --git a/substrate/frame/system/src/offchain.rs b/substrate/frame/system/src/offchain.rs index a019cfd666e..a64b3261964 100644 --- a/substrate/frame/system/src/offchain.rs +++ b/substrate/frame/system/src/offchain.rs @@ -22,7 +22,7 @@ //! This module provides transaction related helpers to: //! - Submit a raw unsigned transaction //! - Submit an unsigned transaction with a signed payload -//! - Submit a signed transction. +//! - Submit a signed transaction. //! //! ## Usage //! @@ -384,7 +384,7 @@ where /// /// // runtime-specific public key /// type Public = MultiSigner: From; -/// type Signature = MulitSignature: From; +/// type Signature = MultiSignature: From; /// ``` // TODO [#5662] Potentially use `IsWrappedBy` types, or find some other way to make it easy to // obtain unwrapped crypto (and wrap it back). @@ -444,7 +444,7 @@ pub trait SigningTypes: crate::Config { /// A public key that is capable of identifying `AccountId`s. /// /// Usually that's either a raw crypto public key (e.g. `sr25519::Public`) or - /// an aggregate type for multiple crypto public keys, like `MulitSigner`. + /// an aggregate type for multiple crypto public keys, like `MultiSigner`. type Public: Clone + PartialEq + IdentifyAccount diff --git a/substrate/frame/tips/src/migrations/mod.rs b/substrate/frame/tips/src/migrations/mod.rs index 9cdd01c17fb..a7917bfce16 100644 --- a/substrate/frame/tips/src/migrations/mod.rs +++ b/substrate/frame/tips/src/migrations/mod.rs @@ -17,7 +17,7 @@ /// Version 4. /// -/// For backward compatability reasons, pallet-tips uses `Treasury` for storage module prefix +/// For backward compatibility reasons, pallet-tips uses `Treasury` for storage module prefix /// before calling this migration. After calling this migration, it will get replaced with /// own storage identifier. pub mod v4; diff --git a/substrate/frame/transaction-payment/src/tests.rs b/substrate/frame/transaction-payment/src/tests.rs index d3a1721ccb9..bc0efd2d64a 100644 --- a/substrate/frame/transaction-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/src/tests.rs @@ -384,7 +384,7 @@ fn query_call_info_and_fee_details_works() { adjusted_weight_fee: info .weight .min(BlockWeights::get().max_block) - .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multipler */ + .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multiplier */ }), tip: 0, }, diff --git a/substrate/frame/transaction-storage/README.md b/substrate/frame/transaction-storage/README.md index 1066968469d..b173c0a84d5 100644 --- a/substrate/frame/transaction-storage/README.md +++ b/substrate/frame/transaction-storage/README.md @@ -79,7 +79,7 @@ ipfs block get /ipfs/ > kitten.jpeg ``` To renew data and prevent it from being disposed after the storage period, use `transactionStorage.renew(block, index)` -where `block` is the block number of the previous store or renew transction, and index is the index of that transaction +where `block` is the block number of the previous store or renew transaction, and index is the index of that transaction in the block. diff --git a/substrate/frame/transaction-storage/src/lib.rs b/substrate/frame/transaction-storage/src/lib.rs index d32cfe169ce..398cb350c50 100644 --- a/substrate/frame/transaction-storage/src/lib.rs +++ b/substrate/frame/transaction-storage/src/lib.rs @@ -137,7 +137,7 @@ pub mod pallet { InvalidProof, /// Missing storage proof. MissingProof, - /// Unable to verify proof becasue state data is missing. + /// Unable to verify proof because state data is missing. MissingStateData, /// Double proof check in the block. DoubleCheck, diff --git a/substrate/frame/transaction-storage/src/tests.rs b/substrate/frame/transaction-storage/src/tests.rs index e17b3ca3beb..621f74804ec 100644 --- a/substrate/frame/transaction-storage/src/tests.rs +++ b/substrate/frame/transaction-storage/src/tests.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Tests for transction-storage pallet. +//! Tests for transaction-storage pallet. use super::{Pallet as TransactionStorage, *}; use crate::mock::*; @@ -53,8 +53,8 @@ fn discards_data() { }; run_to_block(11, proof_provider); assert!(Transactions::::get(1).is_some()); - let transctions = Transactions::::get(1).unwrap(); - assert_eq!(transctions.len(), 2); + let transactions = Transactions::::get(1).unwrap(); + assert_eq!(transactions.len(), 2); assert_eq!(ChunkCount::::get(1), 16); run_to_block(12, proof_provider); assert!(Transactions::::get(1).is_none()); diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs index 5e429d3914b..d569ae406ea 100644 --- a/substrate/frame/treasury/src/lib.rs +++ b/substrate/frame/treasury/src/lib.rs @@ -621,7 +621,7 @@ pub mod pallet { with_context::>, _>(|v| { let context = v.or_default(); - // We group based on `max_amount`, to dinstinguish between different kind of + // We group based on `max_amount`, to distinguish between different kind of // origins. (assumes that all origins have different `max_amount`) // // Worst case is that we reject some "valid" request. @@ -1042,7 +1042,7 @@ impl, I: 'static> Pallet { /// ### Invariants of proposal storage items /// /// 1. [`ProposalCount`] >= Number of elements in [`Proposals`]. - /// 2. Each entry in [`Proposals`] should be saved under a key stricly less than current + /// 2. Each entry in [`Proposals`] should be saved under a key strictly less than current /// [`ProposalCount`]. /// 3. Each [`ProposalIndex`] contained in [`Approvals`] should exist in [`Proposals`]. /// Note, that this automatically implies [`Approvals`].count() <= [`Proposals`].count(). @@ -1078,7 +1078,7 @@ impl, I: 'static> Pallet { /// ## Invariants of spend storage items /// /// 1. [`SpendCount`] >= Number of elements in [`Spends`]. - /// 2. Each entry in [`Spends`] should be saved under a key stricly less than current + /// 2. Each entry in [`Spends`] should be saved under a key strictly less than current /// [`SpendCount`]. /// 3. For each spend entry contained in [`Spends`] we should have spend.expire_at /// > spend.valid_from. diff --git a/substrate/frame/uniques/src/tests.rs b/substrate/frame/uniques/src/tests.rs index 351dac09f7f..afd0352bf90 100644 --- a/substrate/frame/uniques/src/tests.rs +++ b/substrate/frame/uniques/src/tests.rs @@ -289,7 +289,7 @@ fn transfer_owner_should_work() { assert_eq!(Balances::reserved_balance(&2), 0); assert_eq!(Balances::reserved_balance(&3), 45); - // 2's acceptence from before is reset when it became owner, so it cannot be transfered + // 2's acceptance from before is reset when it became owner, so it cannot be transferred // without a fresh acceptance. assert_noop!( Uniques::transfer_ownership(RuntimeOrigin::signed(3), 0, 2), @@ -692,7 +692,7 @@ fn approved_account_gets_reset_after_transfer() { assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3)); assert_ok!(Uniques::transfer(RuntimeOrigin::signed(2), 0, 42, 5)); - // this shouldn't work because we have just transfered the item to another account. + // this shouldn't work because we have just transferred the item to another account. assert_noop!( Uniques::transfer(RuntimeOrigin::signed(3), 0, 42, 4), Error::::NoPermission diff --git a/substrate/frame/utility/README.md b/substrate/frame/utility/README.md index 00fff76cd62..0a6769ae1c7 100644 --- a/substrate/frame/utility/README.md +++ b/substrate/frame/utility/README.md @@ -17,7 +17,7 @@ This module contains two basic pieces of functionality: need multiple distinct accounts (e.g. as controllers for many staking accounts), but where it's perfectly fine to have each of them controlled by the same underlying keypair. Derivative accounts are, for the purposes of proxy filtering considered exactly the same as - the oigin and are thus hampered with the origin's filters. + the origin and are thus hampered with the origin's filters. Since proxy filters are respected in all dispatches of this module, it should never need to be filtered by any proxy. diff --git a/substrate/frame/vesting/src/migrations.rs b/substrate/frame/vesting/src/migrations.rs index cac3c90b403..6fe82312b63 100644 --- a/substrate/frame/vesting/src/migrations.rs +++ b/substrate/frame/vesting/src/migrations.rs @@ -29,7 +29,7 @@ pub mod v1 { log::debug!( target: "runtime::vesting", - "migration: Vesting storage version v1 PRE migration checks succesful!" + "migration: Vesting storage version v1 PRE migration checks successful!" ); Ok(()) diff --git a/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs b/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs index e76dd1503e3..e2d31065635 100644 --- a/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs +++ b/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs @@ -66,7 +66,7 @@ fn main() { let c = FixedI64::saturating_from_integer(x.saturating_add(y)); assert_eq!(a.saturating_add(b), c); - // Check substraction. + // Check subtraction. let a = FixedI64::saturating_from_integer(x); let b = FixedI64::saturating_from_integer(y); let c = FixedI64::saturating_from_integer(x.saturating_sub(y)); diff --git a/substrate/primitives/arithmetic/src/fixed_point.rs b/substrate/primitives/arithmetic/src/fixed_point.rs index 736a900bde2..c4e9259c5fc 100644 --- a/substrate/primitives/arithmetic/src/fixed_point.rs +++ b/substrate/primitives/arithmetic/src/fixed_point.rs @@ -568,7 +568,7 @@ macro_rules! implement_fixed { let v = self.0 as u128; // Want x' = sqrt(x) where x = n/D and x' = n'/D (D is fixed) - // Our prefered way is: + // Our preferred way is: // sqrt(n/D) = sqrt(nD / D^2) = sqrt(nD)/sqrt(D^2) = sqrt(nD)/D // ergo n' = sqrt(nD) // but this requires nD to fit into our type. diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index 8208f9128e7..7a09865f858 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -187,7 +187,7 @@ pub trait Backend: /// a block with the given `base_hash`. /// /// The search space is always limited to blocks which are in the finalized - /// chain or descendents of it. + /// chain or descendants of it. /// /// Returns `Ok(None)` if `base_hash` is not found in search space. // TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444) diff --git a/substrate/primitives/blockchain/src/error.rs b/substrate/primitives/blockchain/src/error.rs index 74a2ed3fba5..e8ac148d751 100644 --- a/substrate/primitives/blockchain/src/error.rs +++ b/substrate/primitives/blockchain/src/error.rs @@ -34,7 +34,7 @@ pub enum ApplyExtrinsicFailed { /// The transaction cannot be included into the current block. /// /// This doesn't necessary mean that the transaction itself is invalid, but it might be just - /// unappliable onto the current block. + /// unapplicable onto the current block. #[error("Extrinsic is not valid: {0:?}")] Validity(#[from] TransactionValidityError), diff --git a/substrate/primitives/blockchain/src/header_metadata.rs b/substrate/primitives/blockchain/src/header_metadata.rs index 08b3c9ab3df..ccd640c0567 100644 --- a/substrate/primitives/blockchain/src/header_metadata.rs +++ b/substrate/primitives/blockchain/src/header_metadata.rs @@ -178,7 +178,7 @@ pub struct TreeRoute { impl TreeRoute { /// Creates a new `TreeRoute`. /// - /// To preserve the structure safety invariats it is required that `pivot < route.len()`. + /// To preserve the structure safety invariants it is required that `pivot < route.len()`. pub fn new(route: Vec>, pivot: usize) -> Result { if pivot < route.len() { Ok(TreeRoute { route, pivot }) @@ -212,7 +212,7 @@ impl TreeRoute { ) } - /// Get a slice of enacted blocks (descendents of the common ancestor) + /// Get a slice of enacted blocks (descendants of the common ancestor) pub fn enacted(&self) -> &[HashAndNumber] { &self.route[self.pivot + 1..] } diff --git a/substrate/primitives/consensus/babe/src/lib.rs b/substrate/primitives/consensus/babe/src/lib.rs index 6eb75b270a0..ee07da6829f 100644 --- a/substrate/primitives/consensus/babe/src/lib.rs +++ b/substrate/primitives/consensus/babe/src/lib.rs @@ -266,7 +266,7 @@ impl Default for BabeEpochConfiguration { } /// Verifies the equivocation proof by making sure that: both headers have -/// different hashes, are targetting the same slot, and have valid signatures by +/// different hashes, are targeting the same slot, and have valid signatures by /// the same authority. pub fn check_equivocation_proof(proof: EquivocationProof) -> bool where @@ -298,7 +298,7 @@ where let first_pre_digest = find_pre_digest(&proof.first_header)?; let second_pre_digest = find_pre_digest(&proof.second_header)?; - // both headers must be targetting the same slot and it must + // both headers must be targeting the same slot and it must // be the same as the one in the proof. if proof.slot != first_pre_digest.slot() || first_pre_digest.slot() != second_pre_digest.slot() diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index b3f62ead736..70978ca559d 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -200,7 +200,7 @@ pub mod ecdsa_bls_crypto { fn verify(&self, signature: &::Signature, msg: &[u8]) -> bool { // We can not simply call // `EcdsaBlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref())` - // because that invokes ECDSA default verification which perfoms Blake2b hash + // because that invokes ECDSA default verification which performs Blake2b hash // which we don't want. This is because ECDSA signatures are meant to be verified // on Ethereum network where Keccak hasher is significantly cheaper than Blake2b. // See Figure 3 of [OnSc21](https://www.scitepress.org/Papers/2021/106066/106066.pdf) diff --git a/substrate/primitives/consensus/beefy/src/mmr.rs b/substrate/primitives/consensus/beefy/src/mmr.rs index 74851ece7ac..0bc303d51c0 100644 --- a/substrate/primitives/consensus/beefy/src/mmr.rs +++ b/substrate/primitives/consensus/beefy/src/mmr.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! BEEFY + MMR utilties. +//! BEEFY + MMR utilities. //! //! While BEEFY can be used completely independently as an additional consensus gadget, //! it is designed around a main use case of bridging standalone networks together. @@ -77,7 +77,7 @@ pub struct MmrLeaf { /// /// Given that adding new struct elements in SCALE is backward compatible (i.e. old format can be /// still decoded, the new fields will simply be ignored). We expect the major version to be bumped -/// very rarely (hopefuly never). +/// very rarely (hopefully never). #[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] pub struct MmrLeafVersion(u8); impl MmrLeafVersion { diff --git a/substrate/primitives/consensus/beefy/src/payload.rs b/substrate/primitives/consensus/beefy/src/payload.rs index dff017b49e0..1a06e620e7a 100644 --- a/substrate/primitives/consensus/beefy/src/payload.rs +++ b/substrate/primitives/consensus/beefy/src/payload.rs @@ -43,7 +43,7 @@ pub mod known_payloads { pub struct Payload(Vec<(BeefyPayloadId, Vec)>); impl Payload { - /// Construct a new payload given an initial vallue + /// Construct a new payload given an initial value pub fn from_single_entry(id: BeefyPayloadId, value: Vec) -> Self { Self(vec![(id, value)]) } diff --git a/substrate/primitives/consensus/common/src/lib.rs b/substrate/primitives/consensus/common/src/lib.rs index 6505d005deb..01d3b7a24f9 100644 --- a/substrate/primitives/consensus/common/src/lib.rs +++ b/substrate/primitives/consensus/common/src/lib.rs @@ -182,7 +182,7 @@ pub trait Proposer { + Send + Unpin + 'static; - /// The supported proof recording by the implementator of this trait. See [`ProofRecording`] + /// The supported proof recording by the implementor of this trait. See [`ProofRecording`] /// for more information. type ProofRecording: self::ProofRecording + Send + Sync + 'static; /// The proof type used by [`Self::ProofRecording`]. diff --git a/substrate/primitives/consensus/sassafras/src/ticket.rs b/substrate/primitives/consensus/sassafras/src/ticket.rs index dc0a61990d3..345de99be28 100644 --- a/substrate/primitives/consensus/sassafras/src/ticket.rs +++ b/substrate/primitives/consensus/sassafras/src/ticket.rs @@ -115,7 +115,7 @@ mod tests { let threshold = ticket_id_threshold(redundancy, slots, attempts, validators); let threshold = threshold as f64 / TicketId::MAX as f64; - // We expect that the total number of tickets allowed to be submited + // We expect that the total number of tickets allowed to be submitted // is slots*redundancy let avt = ((attempts * validators) as f64 * threshold) as u32; assert_eq!(avt, slots * redundancy); diff --git a/substrate/primitives/consensus/sassafras/src/vrf.rs b/substrate/primitives/consensus/sassafras/src/vrf.rs index 815edb5eb66..537cff52ab6 100644 --- a/substrate/primitives/consensus/sassafras/src/vrf.rs +++ b/substrate/primitives/consensus/sassafras/src/vrf.rs @@ -101,7 +101,7 @@ pub fn make_ticket_id(input: &VrfInput, pre_output: &VrfPreOutput) -> TicketId { u128::from_le_bytes(bytes) } -/// Make revealed key seed from a given VRF input and pre-ouput. +/// Make revealed key seed from a given VRF input and pre-output. /// /// Input should have been obtained via [`revealed_key_input`]. /// Pre-output should have been obtained from the input directly using the vrf diff --git a/substrate/primitives/core/src/address_uri.rs b/substrate/primitives/core/src/address_uri.rs index 2e32d0cd86d..d44f3c0c87c 100644 --- a/substrate/primitives/core/src/address_uri.rs +++ b/substrate/primitives/core/src/address_uri.rs @@ -85,7 +85,7 @@ impl Error { /// Complementary error information. /// -/// Strucutre contains complementary information about parsing address URI string. +/// Structure contains complementary information about parsing address URI string. /// String contains a copy of an original URI string, 0-based integer indicates position of invalid /// character. #[derive(Debug, PartialEq, Eq, Clone)] diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs index 42c8b293634..71ee2da5383 100644 --- a/substrate/primitives/core/src/bandersnatch.rs +++ b/substrate/primitives/core/src/bandersnatch.rs @@ -249,7 +249,7 @@ pub mod vrf { /// /// The `transcript` summarizes a set of messages which are defining a particular /// protocol by automating the Fiat-Shamir transform for challenge generation. - /// A good explaination of the topic can be found in Merlin [docs](https://merlin.cool/) + /// A good explanation of the topic can be found in Merlin [docs](https://merlin.cool/) /// /// The `inputs` is a sequence of [`VrfInput`]s which, during the signing procedure, are /// first transformed to [`VrfPreOutput`]s. Both inputs and pre-outputs are then appended to diff --git a/substrate/primitives/core/src/paired_crypto.rs b/substrate/primitives/core/src/paired_crypto.rs index 3901846b375..260e86b6ff9 100644 --- a/substrate/primitives/core/src/paired_crypto.rs +++ b/substrate/primitives/core/src/paired_crypto.rs @@ -73,7 +73,7 @@ pub mod ecdsa_bls377 { #[cfg(feature = "full_crypto")] impl Pair { - /// Hashes the `message` with the specified [`Hasher`] before signing sith the ECDSA secret + /// Hashes the `message` with the specified [`Hasher`] before signing with the ECDSA secret /// component. /// /// The hasher does not affect the BLS12-377 component. This generates BLS12-377 Signature diff --git a/substrate/primitives/inherents/src/client_side.rs b/substrate/primitives/inherents/src/client_side.rs index 27479de136f..3c299dfa4ee 100644 --- a/substrate/primitives/inherents/src/client_side.rs +++ b/substrate/primitives/inherents/src/client_side.rs @@ -23,7 +23,7 @@ use sp_runtime::traits::Block as BlockT; /// It is possible for the caller to provide custom arguments to the callee by setting the /// `ExtraArgs` generic parameter. /// -/// The crate already provides some convience implementations of this trait for +/// The crate already provides some convince implementations of this trait for /// `Box` and closures. So, it should not be required to implement /// this trait manually. #[async_trait::async_trait] diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index e47775d56d9..dddea4ffa23 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -85,7 +85,7 @@ disable_allocator = [] # This gives the caller direct programmatic access to the error message. # # When disabled the error message will only be printed out in the -# logs, with the caller receving a generic "wasm `unreachable` instruction executed" +# logs, with the caller receiving a generic "wasm `unreachable` instruction executed" # error message. # # This has no effect if both `disable_panic_handler` and `disable_oom` diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index 684854ea5c8..ec32b729033 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -1081,7 +1081,7 @@ pub trait Crypto { /// Register a `ecdsa` signature for batch verification. /// /// Batch verification must be enabled by calling [`start_batch_verify`]. - /// If batch verification is not enabled, the signature will be verified immediatley. + /// If batch verification is not enabled, the signature will be verified immediately. /// To get the result of the batch verification, [`finish_batch_verify`] /// needs to be called. /// @@ -1696,9 +1696,9 @@ mod tracing_setup { /// The PassingTracingSubscriber implements `tracing_core::Subscriber` /// and pushes the information across the runtime interface to the host - struct PassingTracingSubsciber; + struct PassingTracingSubscriber; - impl tracing_core::Subscriber for PassingTracingSubsciber { + impl tracing_core::Subscriber for PassingTracingSubscriber { fn enabled(&self, metadata: &Metadata<'_>) -> bool { wasm_tracing::enabled(Crossing(metadata.into())) } @@ -1731,7 +1731,7 @@ mod tracing_setup { /// set the global bridging subscriber once. pub fn init_tracing() { if TRACING_SET.load(Ordering::Relaxed) == false { - set_global_default(Dispatch::new(PassingTracingSubsciber {})) + set_global_default(Dispatch::new(PassingTracingSubscriber {})) .expect("We only ever call this once"); TRACING_SET.store(true, Ordering::Relaxed); } diff --git a/substrate/primitives/maybe-compressed-blob/Cargo.toml b/substrate/primitives/maybe-compressed-blob/Cargo.toml index fa5383d03b1..178c915ce83 100644 --- a/substrate/primitives/maybe-compressed-blob/Cargo.toml +++ b/substrate/primitives/maybe-compressed-blob/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" repository.workspace = true -description = "Handling of blobs, usually Wasm code, which may be compresed" +description = "Handling of blobs, usually Wasm code, which may be compressed" documentation = "https://docs.rs/sp-maybe-compressed-blob" readme = "README.md" diff --git a/substrate/primitives/metadata-ir/src/types.rs b/substrate/primitives/metadata-ir/src/types.rs index b107d20a8e2..b05f26ff55d 100644 --- a/substrate/primitives/metadata-ir/src/types.rs +++ b/substrate/primitives/metadata-ir/src/types.rs @@ -160,7 +160,7 @@ pub struct ExtrinsicMetadataIR { pub ty: T::Type, /// Extrinsic version. pub version: u8, - /// The type of the address that signes the extrinsic + /// The type of the address that signs the extrinsic pub address_ty: T::Type, /// The type of the outermost Call enum. pub call_ty: T::Type, diff --git a/substrate/primitives/npos-elections/src/pjr.rs b/substrate/primitives/npos-elections/src/pjr.rs index 08e40a014ea..6e3775199a2 100644 --- a/substrate/primitives/npos-elections/src/pjr.rs +++ b/substrate/primitives/npos-elections/src/pjr.rs @@ -261,7 +261,7 @@ fn prepare_pjr_input( } } - // Convert Suppports into a SupportMap + // Convert Supports into a SupportMap // // As a flat list, we're limited to linear search. That gives the production of `candidates`, // below, a complexity of `O(s*c)`, where `s == supports.len()` and `c == all_candidates.len()`. diff --git a/substrate/primitives/npos-elections/src/reduce.rs b/substrate/primitives/npos-elections/src/reduce.rs index c9ed493daf3..3fd291f88ab 100644 --- a/substrate/primitives/npos-elections/src/reduce.rs +++ b/substrate/primitives/npos-elections/src/reduce.rs @@ -393,7 +393,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 // voter_root_path.last().unwrap()); TODO: @kian // the common path must be non-void.. debug_assert!(common_count > 0); - // and smaller than btoh + // and smaller than both debug_assert!(common_count <= voter_root_path.len()); debug_assert!(common_count <= target_root_path.len()); diff --git a/substrate/primitives/runtime-interface/src/lib.rs b/substrate/primitives/runtime-interface/src/lib.rs index 8b0edf1ec81..f6ef27789b3 100644 --- a/substrate/primitives/runtime-interface/src/lib.rs +++ b/substrate/primitives/runtime-interface/src/lib.rs @@ -283,7 +283,7 @@ pub use sp_std; /// /// `key` holds the pointer and the length to the `data` slice. /// pub fn call(data: &[u8]) -> Vec { /// extern "C" { pub fn ext_call_version_2(key: u64); } -/// // Should call into extenal `ext_call_version_2(<[u8] as IntoFFIValue>::into_ffi_value(key))` +/// // Should call into external `ext_call_version_2(<[u8] as IntoFFIValue>::into_ffi_value(key))` /// // But this is too much to replicate in a doc test so here we just return a dummy vector. /// // Note that we jump into the latest version not marked as `register_only` (i.e. version 2). /// Vec::new() diff --git a/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs b/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs index 2f42e60504e..871a4922ce3 100644 --- a/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs +++ b/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs @@ -45,19 +45,19 @@ pub fn import_sp_io() { #[runtime_interface] pub trait TestApi { - fn test_versionning(&self, _data: u32) -> bool { + fn test_versioning(&self, _data: u32) -> bool { // should not be called unimplemented!() } } wasm_export_functions! { - fn test_versionning_works() { + fn test_versioning_works() { // old api allows only 42 and 50 - assert!(test_api::test_versionning(42)); - assert!(test_api::test_versionning(50)); + assert!(test_api::test_versioning(42)); + assert!(test_api::test_versioning(50)); - assert!(!test_api::test_versionning(142)); - assert!(!test_api::test_versionning(0)); + assert!(!test_api::test_versioning(142)); + assert!(!test_api::test_versioning(0)); } } diff --git a/substrate/primitives/runtime-interface/test-wasm/src/lib.rs b/substrate/primitives/runtime-interface/test-wasm/src/lib.rs index cf1ff3bca08..2b3fc728f6f 100644 --- a/substrate/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/substrate/primitives/runtime-interface/test-wasm/src/lib.rs @@ -126,21 +126,21 @@ pub trait TestApi { val } - fn test_versionning(&self, data: u32) -> bool { + fn test_versioning(&self, data: u32) -> bool { data == 42 || data == 50 } #[version(2)] - fn test_versionning(&self, data: u32) -> bool { + fn test_versioning(&self, data: u32) -> bool { data == 42 } - fn test_versionning_register_only(&self, data: u32) -> bool { + fn test_versioning_register_only(&self, data: u32) -> bool { data == 80 } #[version(2, register_only)] - fn test_versionning_register_only(&self, data: u32) -> bool { + fn test_versioning_register_only(&self, data: u32) -> bool { data == 42 } @@ -282,21 +282,21 @@ wasm_export_functions! { assert_eq!(0, len); } - fn test_versionning_works() { + fn test_versioning_works() { // we fix new api to accept only 42 as a proper input // as opposed to sp-runtime-interface-test-wasm-deprecated::test_api::verify_input // which accepted 42 and 50. - assert!(test_api::test_versionning(42)); + assert!(test_api::test_versioning(42)); - assert!(!test_api::test_versionning(50)); - assert!(!test_api::test_versionning(102)); + assert!(!test_api::test_versioning(50)); + assert!(!test_api::test_versioning(102)); } - fn test_versionning_register_only_works() { + fn test_versioning_register_only_works() { // Ensure that we will import the version of the runtime interface function that // isn't tagged with `register_only`. - assert!(!test_api::test_versionning_register_only(42)); - assert!(test_api::test_versionning_register_only(80)); + assert!(!test_api::test_versioning_register_only(42)); + assert!(test_api::test_versioning_register_only(80)); } fn test_return_input_as_tuple() { diff --git a/substrate/primitives/runtime-interface/test/src/lib.rs b/substrate/primitives/runtime-interface/test/src/lib.rs index 215704a1121..05a955fbe3f 100644 --- a/substrate/primitives/runtime-interface/test/src/lib.rs +++ b/substrate/primitives/runtime-interface/test/src/lib.rs @@ -163,18 +163,18 @@ fn test_array_return_value_memory_is_freed() { } #[test] -fn test_versionining_with_new_host_works() { +fn test_versioning_with_new_host_works() { // We call to the new wasm binary with new host function. - call_wasm_method::(wasm_binary_unwrap(), "test_versionning_works"); + call_wasm_method::(wasm_binary_unwrap(), "test_versioning_works"); // we call to the old wasm binary with a new host functions // old versions of host functions should be called and test should be ok! - call_wasm_method::(wasm_binary_deprecated_unwrap(), "test_versionning_works"); + call_wasm_method::(wasm_binary_deprecated_unwrap(), "test_versioning_works"); } #[test] -fn test_versionining_register_only() { - call_wasm_method::(wasm_binary_unwrap(), "test_versionning_register_only_works"); +fn test_versioning_register_only() { + call_wasm_method::(wasm_binary_unwrap(), "test_versioning_register_only_works"); } fn run_test_in_another_process( diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 5b54caf597b..8f6c0c6f650 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -40,7 +40,7 @@ use sp_std::{fmt, prelude::*}; /// the decoding fails. const EXTRINSIC_FORMAT_VERSION: u8 = 4; -/// The `SingaturePayload` of `UncheckedExtrinsic`. +/// The `SignaturePayload` of `UncheckedExtrinsic`. type UncheckedSignaturePayload = (Address, Signature, Extra); /// An extrinsic right from the external world. This is unchecked and so can contain a signature. diff --git a/substrate/primitives/runtime/src/offchain/storage_lock.rs b/substrate/primitives/runtime/src/offchain/storage_lock.rs index 56d0eeae527..cfdaa954fe5 100644 --- a/substrate/primitives/runtime/src/offchain/storage_lock.rs +++ b/substrate/primitives/runtime/src/offchain/storage_lock.rs @@ -556,7 +556,7 @@ mod tests { let res = lock.try_lock(); assert_eq!(res.is_ok(), false); - // sleep again untill sleep_until > deadline + // sleep again until sleep_until > deadline offchain::sleep_until(offchain::timestamp().add(Duration::from_millis(200))); // the lock has expired, failed to extend it diff --git a/substrate/primitives/runtime/src/testing.rs b/substrate/primitives/runtime/src/testing.rs index 5f94c834a8f..b4aeda5a0e7 100644 --- a/substrate/primitives/runtime/src/testing.rs +++ b/substrate/primitives/runtime/src/testing.rs @@ -284,9 +284,9 @@ where } /// The signature payload of a `TestXt`. -type TxSingaturePayload = (u64, Extra); +type TxSignaturePayload = (u64, Extra); -impl SignaturePayload for TxSingaturePayload { +impl SignaturePayload for TxSignaturePayload { type SignatureAddress = u64; type Signature = (); type SignatureExtra = Extra; @@ -299,7 +299,7 @@ impl SignaturePayload for TxSingaturePayload { #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] pub struct TestXt { /// Signature of the extrinsic. - pub signature: Option>, + pub signature: Option>, /// Call of the extrinsic. pub call: Call, } @@ -348,7 +348,7 @@ impl traits::Extrinsic for TestXt { type Call = Call; - type SignaturePayload = TxSingaturePayload; + type SignaturePayload = TxSignaturePayload; fn is_signed(&self) -> Option { Some(self.signature.is_some()) diff --git a/substrate/primitives/runtime/src/traits.rs b/substrate/primitives/runtime/src/traits.rs index 9ee41339d50..5a6c306dd2a 100644 --- a/substrate/primitives/runtime/src/traits.rs +++ b/substrate/primitives/runtime/src/traits.rs @@ -330,7 +330,7 @@ impl> Morph
for MorphInto { } } -/// Implementation of `TryMorph` which attmepts to convert between types using `TryInto`. +/// Implementation of `TryMorph` which attempts to convert between types using `TryInto`. pub struct TryMorphInto(sp_std::marker::PhantomData); impl> TryMorph for TryMorphInto { type Outcome = T; @@ -1449,7 +1449,7 @@ pub trait Dispatchable { /// to represent the dispatch class and weight. type Info; /// Additional information that is returned by `dispatch`. Can be used to supply the caller - /// with information about a `Dispatchable` that is ownly known post dispatch. + /// with information about a `Dispatchable` that is only known post dispatch. type PostInfo: Eq + PartialEq + Clone + Copy + Encode + Decode + Printable; /// Actually dispatch this call and return the result of it. fn dispatch(self, origin: Self::RuntimeOrigin) diff --git a/substrate/primitives/staking/src/offence.rs b/substrate/primitives/staking/src/offence.rs index 64aa4692eb5..30d96d0cbaf 100644 --- a/substrate/primitives/staking/src/offence.rs +++ b/substrate/primitives/staking/src/offence.rs @@ -117,7 +117,7 @@ pub trait Offence { /// Errors that may happen on offence reports. #[derive(PartialEq, sp_runtime::RuntimeDebug)] pub enum OffenceError { - /// The report has already been sumbmitted. + /// The report has already been submitted. DuplicateReport, /// Other error has happened. diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index 73110f55cba..9aa32bc866c 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -44,7 +44,7 @@ const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within const BENCHMARKING_FN: &str = "\ This is a special fn only for benchmarking where a database commit happens from the runtime. For that reason client started transactions before calling into runtime are not allowed. - Without client transactions the loop condition garantuees the success of the tx close."; + Without client transactions the loop condition guarantees the success of the tx close."; #[cfg(feature = "std")] fn guard() -> sp_panic_handler::AbortGuard { @@ -722,7 +722,7 @@ impl Encode for EncodeOpaqueValue { } } -/// Auxialiary structure for appending a value to a storage item. +/// Auxiliary structure for appending a value to a storage item. pub(crate) struct StorageAppend<'a>(&'a mut Vec); impl<'a> StorageAppend<'a> { diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index 200cebe68de..13087431d38 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -1451,7 +1451,7 @@ mod tests { enum Item { InitializationItem, DiscardedItem, - CommitedItem, + CommittedItem, } let key = b"events".to_vec(); @@ -1488,21 +1488,21 @@ mod tests { assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode())); - ext.storage_append(key.clone(), Item::CommitedItem.encode()); + ext.storage_append(key.clone(), Item::CommittedItem.encode()); assert_eq!( ext.storage(key.as_slice()), - Some(vec![Item::InitializationItem, Item::CommitedItem].encode()), + Some(vec![Item::InitializationItem, Item::CommittedItem].encode()), ); } overlay.start_transaction(); - // Then only initlaization item and second (committed) item should persist. + // Then only initialization item and second (committed) item should persist. { let ext = Ext::new(&mut overlay, backend, None); assert_eq!( ext.storage(key.as_slice()), - Some(vec![Item::InitializationItem, Item::CommitedItem].encode()), + Some(vec![Item::InitializationItem, Item::CommittedItem].encode()), ); } } @@ -1866,7 +1866,7 @@ mod tests { // a inner hashable node (&b"k"[..], Some(&long_vec[..])), // need to ensure this is not an inline node - // otherwhise we do not know what is accessed when + // otherwise we do not know what is accessed when // storing proof. (&b"key1"[..], Some(&vec![5u8; 32][..])), (&b"key2"[..], Some(&b"val3"[..])), diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index a25a5b81052..601bc2e2919 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -47,7 +47,7 @@ pub struct NoOpenTransaction; #[cfg_attr(test, derive(PartialEq))] pub struct AlreadyInRuntime; -/// Error when calling `exit_runtime` when not being in runtime exection mdde. +/// Error when calling `exit_runtime` when not being in runtime execution mode. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] pub struct NotInRuntime; @@ -269,7 +269,7 @@ impl OverlayedMap { /// /// Panics: /// Panics if there are open transactions: `transaction_depth() > 0` - pub fn drain_commited(self) -> impl Iterator { + pub fn drain_committed(self) -> impl Iterator { assert!(self.transaction_depth() == 0, "Drain is not allowed with open transactions."); self.changes.into_iter().map(|(k, mut v)| (k, v.pop_transaction().value)) } @@ -281,7 +281,7 @@ impl OverlayedMap { self.dirty_keys.len() } - /// Call this before transfering control to the runtime. + /// Call this before transferring control to the runtime. /// /// This protects all existing transactions from being removed by the runtime. /// Calling this while already inside the runtime will return an error. @@ -471,7 +471,7 @@ mod test { } fn assert_drained_changes(is: OverlayedChangeSet, expected: Changes) { - let is = is.drain_commited().collect::>(); + let is = is.drain_committed().collect::>(); let expected = expected .iter() .map(|(k, v)| (k.to_vec(), v.0.map(From::from))) @@ -480,7 +480,7 @@ mod test { } fn assert_drained(is: OverlayedChangeSet, expected: Drained) { - let is = is.drain_commited().collect::>(); + let is = is.drain_committed().collect::>(); let expected = expected .iter() .map(|(k, v)| (k.to_vec(), v.map(From::from))) @@ -526,7 +526,7 @@ mod test { changeset.set(b"key0".to_vec(), Some(b"val0-rolled".to_vec()), Some(1000)); changeset.set(b"key5".to_vec(), Some(b"val5-rolled".to_vec()), None); - // changes contain all changes not only the commmited ones. + // changes contain all changes not only the committed ones. let all_changes: Changes = vec![ (b"key0", (Some(b"val0-rolled"), vec![1, 10, 1000])), (b"key1", (Some(b"val1"), vec![1])), @@ -807,7 +807,7 @@ mod test { fn drain_with_open_transaction_panics() { let mut changeset = OverlayedChangeSet::default(); changeset.start_transaction(); - let _ = changeset.drain_commited(); + let _ = changeset.drain_committed(); } #[test] diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index 039631e4a63..d6fc404e84f 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -480,7 +480,7 @@ impl OverlayedChanges { Ok(()) } - /// Call this before transfering control to the runtime. + /// Call this before transferring control to the runtime. /// /// This protects all existing transactions from being removed by the runtime. /// Calling this while already inside the runtime will return an error. @@ -575,10 +575,10 @@ impl OverlayedChanges { }; use core::mem::take; - let main_storage_changes = take(&mut self.top).drain_commited(); + let main_storage_changes = take(&mut self.top).drain_committed(); let child_storage_changes = take(&mut self.children) .into_iter() - .map(|(key, (val, info))| (key, (val.drain_commited(), info))); + .map(|(key, (val, info))| (key, (val.drain_committed(), info))); let offchain_storage_changes = self.offchain_drain_committed().collect(); @@ -809,7 +809,7 @@ pub struct OverlayedExtensions<'a> { #[cfg(feature = "std")] impl<'a> OverlayedExtensions<'a> { - /// Create a new instance of overalyed extensions from the given extensions. + /// Create a new instance of overlaid extensions from the given extensions. pub fn new(extensions: &'a mut Extensions) -> Self { Self { extensions: extensions diff --git a/substrate/primitives/state-machine/src/testing.rs b/substrate/primitives/state-machine/src/testing.rs index 0eb7b6d1118..e19ba95755c 100644 --- a/substrate/primitives/state-machine/src/testing.rs +++ b/substrate/primitives/state-machine/src/testing.rs @@ -417,7 +417,7 @@ mod tests { original_ext.backend.clone().into_storage(), ); - // Ensure all have the correct ref counrt + // Ensure all have the correct ref count assert!(original_ext.backend.backend_storage().keys().values().all(|r| *r == 2)); // Drain the raw storage and root. diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs index 1824a77c370..a1f00579874 100644 --- a/substrate/primitives/state-machine/src/trie_backend_essence.rs +++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs @@ -34,8 +34,8 @@ use parking_lot::RwLock; use sp_core::storage::{ChildInfo, ChildType, StateVersion}; use sp_trie::{ child_delta_trie_root, delta_trie_root, empty_child_trie_root, - read_child_trie_first_descedant_value, read_child_trie_hash, read_child_trie_value, - read_trie_first_descedant_value, read_trie_value, + read_child_trie_first_descendant_value, read_child_trie_hash, read_child_trie_value, + read_trie_first_descendant_value, read_trie_value, trie_types::{TrieDBBuilder, TrieError}, DBValue, KeySpacedDB, MerkleValue, NodeCodec, PrefixedMemoryDB, Trie, TrieCache, TrieDBRawIterator, TrieRecorder, TrieRecorderProvider, @@ -554,7 +554,7 @@ where let map_e = |e| format!("Trie lookup error: {}", e); self.with_recorder_and_cache(None, |recorder, cache| { - read_trie_first_descedant_value::, _>(self, &self.root, key, recorder, cache) + read_trie_first_descendant_value::, _>(self, &self.root, key, recorder, cache) .map_err(map_e) }) } @@ -570,7 +570,7 @@ where let map_e = |e| format!("Trie lookup error: {}", e); self.with_recorder_and_cache(Some(child_root), |recorder, cache| { - read_child_trie_first_descedant_value::, _>( + read_child_trie_first_descendant_value::, _>( child_info.keyspace(), self, &child_root, diff --git a/substrate/primitives/storage/src/lib.rs b/substrate/primitives/storage/src/lib.rs index b55cc8f2174..197994f5747 100644 --- a/substrate/primitives/storage/src/lib.rs +++ b/substrate/primitives/storage/src/lib.rs @@ -452,7 +452,7 @@ impl TryFrom for StateVersion { impl StateVersion { /// If defined, values in state of size bigger or equal /// to this threshold will use a separate trie node. - /// Otherwhise, value will be inlined in branch or leaf + /// Otherwise, value will be inlined in branch or leaf /// node. pub fn state_value_threshold(&self) -> Option { match self { diff --git a/substrate/primitives/tracing/src/lib.rs b/substrate/primitives/tracing/src/lib.rs index b8b99230db5..34ed088aed0 100644 --- a/substrate/primitives/tracing/src/lib.rs +++ b/substrate/primitives/tracing/src/lib.rs @@ -17,7 +17,7 @@ //! Substrate tracing primitives and macros. //! -//! To trace functions or invidual code in Substrate, this crate provides [`within_span`] +//! To trace functions or individual code in Substrate, this crate provides [`within_span`] //! and [`enter_span`]. See the individual docs for how to use these macros. //! //! Note that to allow traces from wasm execution environment there are @@ -70,7 +70,7 @@ pub use crate::types::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; /// directly as they yield nothing without the feature present. Instead you should use /// `enter_span!` and `within_span!` – which would strip away even any parameter conversion /// you do within the span-definition (and thus optimise your performance). For your -/// convineience you directly specify the `Level` and name of the span or use the full +/// convenience you directly specify the `Level` and name of the span or use the full /// feature set of `span!`/`span_*!` on it: /// /// # Example @@ -98,7 +98,7 @@ pub use crate::types::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; /// This project only provides the macros and facilities to manage tracing /// it doesn't implement the tracing subscriber or backend directly – that is /// up to the developer integrating it into a specific environment. In native -/// this can and must be done through the regular `tracing`-facitilies, please +/// this can and must be done through the regular `tracing`-facilities, please /// see their documentation for details. /// /// On the wasm-side we've adopted a similar approach of having a global @@ -139,7 +139,7 @@ pub fn init_for_tests() { /// Runs given code within a tracing span, measuring it's execution time. /// /// If tracing is not enabled, the code is still executed. Pass in level and name or -/// use any valid `sp_tracing::Span`followe by `;` and the code to execute, +/// use any valid `sp_tracing::Span`followed by `;` and the code to execute, /// /// # Example /// diff --git a/substrate/primitives/tracing/src/types.rs b/substrate/primitives/tracing/src/types.rs index 3692a81e03c..46f38383d98 100644 --- a/substrate/primitives/tracing/src/types.rs +++ b/substrate/primitives/tracing/src/types.rs @@ -17,7 +17,7 @@ use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; -/// Types for wasm based tracing. Loosly inspired by `tracing-core` but +/// Types for wasm based tracing. Loosely inspired by `tracing-core` but /// optimised for the specific use case. use core::{fmt::Debug, format_args}; @@ -54,7 +54,7 @@ impl core::default::Default for WasmLevel { } } -/// A paramter value provided to the span/event +/// A parameter value provided to the span/event #[derive(Encode, Decode, Clone)] pub enum WasmValue { U8(u8), @@ -180,9 +180,9 @@ impl From for WasmValue { } } -/// The name of a field provided as the argument name when contstructing an +/// The name of a field provided as the argument name when constructing an /// `event!` or `span!`. -/// Generally generated automaticaly via `stringify` from an `'static &str`. +/// Generally generated automatically via `stringify` from an `'static &str`. /// Likely print-able. #[derive(Encode, Decode, Clone)] pub struct WasmFieldName(Vec); @@ -320,7 +320,7 @@ impl tracing_core::field::Visit for WasmValuesSet { self.0.push((field.name().into(), Some(WasmValue::from(value)))) } } -/// Metadata provides generic information about the specifc location of the +/// Metadata provides generic information about the specific location of the /// `span!` or `event!` call on the wasm-side. #[derive(Encode, Decode, Clone)] pub struct WasmMetadata { diff --git a/substrate/primitives/transaction-storage-proof/src/lib.rs b/substrate/primitives/transaction-storage-proof/src/lib.rs index 352f2aec26e..893b2e33bee 100644 --- a/substrate/primitives/transaction-storage-proof/src/lib.rs +++ b/substrate/primitives/transaction-storage-proof/src/lib.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Storage proof primitives. Constains types and basic code to extract storage +//! Storage proof primitives. Contains types and basic code to extract storage //! proofs for indexed transactions. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/substrate/primitives/trie/src/cache/mod.rs b/substrate/primitives/trie/src/cache/mod.rs index 01f08a78adc..32078169b50 100644 --- a/substrate/primitives/trie/src/cache/mod.rs +++ b/substrate/primitives/trie/src/cache/mod.rs @@ -323,7 +323,7 @@ type ValueAccessSet = /// /// This cache should be used per state instance created by the backend. One state instance is /// referring to the state of one block. It will cache all the accesses that are done to the state -/// which could not be fullfilled by the [`SharedTrieCache`]. These locally cached items are merged +/// which could not be fulfilled by the [`SharedTrieCache`]. These locally cached items are merged /// back to the shared trie cache when this instance is dropped. /// /// When using [`Self::as_trie_db_cache`] or [`Self::as_trie_db_mut_cache`], it will lock Mutexes. diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index 81b37a0c9a6..0ae448aff6e 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -326,7 +326,7 @@ pub fn read_trie_value( +pub fn read_trie_first_descendant_value( db: &DB, root: &TrieHash, key: &[u8], @@ -447,7 +447,7 @@ where /// Read the [`trie_db::MerkleValue`] of the node that is the closest descendant for /// the provided child key. -pub fn read_child_trie_first_descedant_value( +pub fn read_child_trie_first_descendant_value( keyspace: &[u8], db: &DB, root: &TrieHash, diff --git a/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs b/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs index 7ca2d9b71f6..3671d4aff6b 100644 --- a/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs +++ b/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs @@ -52,7 +52,7 @@ fn decl_runtime_version_impl_inner(item: ItemConst) -> Result { /// enable `std` feature even for `no_std` wasm runtime builds. /// /// One difference from the original definition is the `apis` field. Since we don't actually parse -/// `apis` from this macro it will always be emitteed as empty. An empty vector can be encoded as +/// `apis` from this macro it will always be emitted as empty. An empty vector can be encoded as /// a zero-byte, thus `u8` is sufficient here. #[derive(Encode)] struct RuntimeVersion { diff --git a/substrate/primitives/version/src/lib.rs b/substrate/primitives/version/src/lib.rs index 9b14a809ac1..789c507742f 100644 --- a/substrate/primitives/version/src/lib.rs +++ b/substrate/primitives/version/src/lib.rs @@ -327,7 +327,7 @@ impl RuntimeVersion { /// /// For runtime with core api version less than 4, /// V0 trie version will be applied to state. - /// Otherwhise, V1 trie version will be use. + /// Otherwise, V1 trie version will be use. pub fn state_version(&self) -> StateVersion { // If version > than 1, keep using latest version. self.state_version.try_into().unwrap_or(StateVersion::V1) diff --git a/substrate/utils/fork-tree/src/lib.rs b/substrate/utils/fork-tree/src/lib.rs index cd175166b9c..ff86467c85d 100644 --- a/substrate/utils/fork-tree/src/lib.rs +++ b/substrate/utils/fork-tree/src/lib.rs @@ -683,7 +683,7 @@ where node.data }); - // Retain only roots that are descendents of the finalized block (this + // Retain only roots that are descendants of the finalized block (this // happens if the node has been properly finalized) or that are // ancestors (or equal) to the finalized block (in this case the node // wasn't finalized earlier presumably because the predicate didn't @@ -1168,7 +1168,7 @@ mod test { Ok(Some(false)), ); - // finalizing "E" is not allowed since there are not finalized anchestors. + // finalizing "E" is not allowed since there are not finalized ancestors. assert_eq!( tree.finalizes_any_with_descendent_if(&"E", 15, &is_descendent_of, |c| c.effective == 10), @@ -1309,7 +1309,7 @@ mod test { fn map_works() { let (mut tree, _) = test_fork_tree(); - // Extend the single root fork-tree to also excercise the roots order during map. + // Extend the single root fork-tree to also exercise the roots order during map. let is_descendent_of = |_: &&str, _: &&str| -> Result { Ok(false) }; let is_root = tree.import("A1", 10, 1, &is_descendent_of).unwrap(); assert!(is_root); diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index c6ba2824016..80a5d27d8c2 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -415,7 +415,7 @@ impl PalletCmd { .map_err(|e| { format!("Error executing and verifying runtime benchmark: {}", e) })?; - // Dont use these results since verification code will add overhead. + // Don't use these results since verification code will add overhead. let _batch = , String> as Decode>::decode( &mut &result[..], @@ -437,7 +437,7 @@ impl PalletCmd { &pallet.clone(), &extrinsic.clone(), &selected_components.clone(), - false, // dont run verification code for final values + false, // don't run verification code for final values self.repeat, ) .encode(), @@ -469,7 +469,7 @@ impl PalletCmd { &pallet.clone(), &extrinsic.clone(), &selected_components.clone(), - false, // dont run verification code for final values + false, // don't run verification code for final values self.repeat, ) .encode(), diff --git a/substrate/utils/frame/generate-bags/src/lib.rs b/substrate/utils/frame/generate-bags/src/lib.rs index 923017261a4..62485c442d3 100644 --- a/substrate/utils/frame/generate-bags/src/lib.rs +++ b/substrate/utils/frame/generate-bags/src/lib.rs @@ -183,7 +183,7 @@ pub fn generate_thresholds( total_issuance: u128, minimum_balance: u128, ) -> Result<(), std::io::Error> { - // ensure the file is accessable + // ensure the file is accessible if let Some(parent) = output.parent() { if !parent.exists() { std::fs::create_dir_all(parent)?; diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index c7399468da9..254d33deec8 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -762,7 +762,7 @@ where let mut sp = Spinner::with_timer(Spinners::Dots, "Inserting keys into DB...".into()); let start = Instant::now(); pending_ext.batch_insert(key_values.clone().into_iter().filter_map(|(k, v)| { - // Don't insert the child keys here, they need to be inserted seperately with all their + // Don't insert the child keys here, they need to be inserted separately with all their // data in the load_child_remote function. match is_default_child_storage_key(&k.0) { true => None, diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index f45258ea593..c0333bb7dac 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -132,7 +132,7 @@ pub trait StateMigrationApi { /// Check current migration state. /// /// This call is performed locally without submitting any transactions. Thus executing this - /// won't change any state. Nonetheless it is a VERY costy call that should be + /// won't change any state. Nonetheless it is a VERY costly call that should be /// only exposed to trusted peers. #[method(name = "state_trieMigrationStatus")] fn call(&self, at: Option) -> RpcResult; diff --git a/substrate/utils/substrate-bip39/README.md b/substrate/utils/substrate-bip39/README.md index 368d18f406b..e7a80ca5f2c 100644 --- a/substrate/utils/substrate-bip39/README.md +++ b/substrate/utils/substrate-bip39/README.md @@ -21,12 +21,12 @@ to wallets providing their own dictionaries and checksum mechanism. Issues with to CSPRNG supplied dictionary phrases. 2. Providing own dictionaries felt into the _you ain't gonna need it_ anti-pattern category on day 1. Wallet providers - (be it hardware or software) typically want their products to be compatibile with other wallets so that users can + (be it hardware or software) typically want their products to be compatible with other wallets so that users can migrate to their product without having to migrate all their assets. To achieve the above phrases have to be precisely encoded in _The One True Canonical Encoding_, for which UTF-8 NFKD was chosen. This is largely irrelevant (and even ignored) for English phrases, as they encode to basically just ASCII in -virtualy every character encoding known to mankind, but immedietly becomes a problem for dictionaries that do use +virtually every character encoding known to mankind, but immediately becomes a problem for dictionaries that do use non-ASCII characters. Even if the right encoding is used and implemented correctly, there are still [other caveats present for some non-english dictionaries](https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md), such as normalizing spaces to a canonical form, or making some latin based characters equivalent to their base in @@ -34,8 +34,8 @@ dictionary lookups (eg. Spanish `ñ` and `n` are meant to be interchangeable). T headache, and opens doors for disagreements between buggy implementations, breaking compatibility. BIP39 does already provide a form of the mnemonic that is free from all of these issues: the entropy byte array. Since -veryfing the checksum requires that we recover the entropy from which the phrase was generated, no extra work is -actually needed here. Wallet implementators can encode the dictionaries in whatever encoding they find convenient (as +verifying the checksum requires that we recover the entropy from which the phrase was generated, no extra work is +actually needed here. Wallet implementors can encode the dictionaries in whatever encoding they find convenient (as long as they are the standard BIP39 dictionaries), no harm in using UTF-16 string primitives that Java and JavaScript provide. Since the dictionary is fixed and known, and the checksum is done on the entropy itself, the exact character encoding used becomes irrelevant, as are the precise codepoints and amount of whitespace around the words. It is thus diff --git a/substrate/utils/substrate-bip39/src/lib.rs b/substrate/utils/substrate-bip39/src/lib.rs index 3673d20faed..5b68bef0c39 100644 --- a/substrate/utils/substrate-bip39/src/lib.rs +++ b/substrate/utils/substrate-bip39/src/lib.rs @@ -43,7 +43,7 @@ pub enum Error { /// /// Any other length will return an error. /// -/// `password` is analog to BIP39 seed generation itself, with an empty string being defalt. +/// `password` is analog to BIP39 seed generation itself, with an empty string being default. pub fn mini_secret_from_entropy(entropy: &[u8], password: &str) -> Result { let seed = seed_from_entropy(entropy, password)?; Ok(MiniSecretKey::from_bytes(&seed[..32]).expect("Length is always correct; qed")) diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index 5cde48c0950..178e499e8f5 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -207,7 +207,7 @@ fn get_cargo_command(target: RuntimeTarget) -> CargoCommand { } else { // If no command before provided us with a cargo that supports our Substrate wasm env, we // try to search one with rustup. If that fails as well, we return the default cargo and let - // the prequisities check fail. + // the perquisites check fail. get_rustup_command(target).unwrap_or(default_cargo) } } diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index 13db9fce431..b58e6bfa36b 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -695,7 +695,7 @@ impl BuildConfiguration { /// "production". It would only contain the builtin profile where the custom profile /// inherits from. This is why we inspect the build path to learn which profile is used. /// - /// When not overriden by a env variable we always default to building wasm with the `Release` + /// When not overridden by a env variable we always default to building wasm with the `Release` /// profile even when the main build uses the debug build. This is because wasm built with the /// `Debug` profile is too slow for normal development activities and almost never intended. /// @@ -704,9 +704,9 @@ impl BuildConfiguration { /// /// # Note /// - /// Can be overriden by setting [`crate::WASM_BUILD_TYPE_ENV`]. + /// Can be overridden by setting [`crate::WASM_BUILD_TYPE_ENV`]. fn detect(target: RuntimeTarget, wasm_project: &Path) -> Self { - let (name, overriden) = if let Ok(name) = env::var(crate::WASM_BUILD_TYPE_ENV) { + let (name, overridden) = if let Ok(name) = env::var(crate::WASM_BUILD_TYPE_ENV) { (name, true) } else { // First go backwards to the beginning of the target directory. @@ -731,14 +731,14 @@ impl BuildConfiguration { (name, false) }; let outer_build_profile = Profile::iter().find(|p| p.directory() == name); - let blob_build_profile = match (outer_build_profile.clone(), overriden) { - // When not overriden by a env variable we default to using the `Release` profile + let blob_build_profile = match (outer_build_profile.clone(), overridden) { + // When not overridden by a env variable we default to using the `Release` profile // for the wasm build even when the main build uses the debug build. This // is because the `Debug` profile is too slow for normal development activities. (Some(Profile::Debug), false) => Profile::Release, - // For any other profile or when overriden we take it at face value. + // For any other profile or when overridden we take it at face value. (Some(profile), _) => profile, - // For non overriden unknown profiles we fall back to `Release`. + // For non overridden unknown profiles we fall back to `Release`. // This allows us to continue building when a custom profile is used for the // main builds cargo. When explicitly passing a profile via env variable we are // not doing a fallback. diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index 65aa1e20752..41d9708ea60 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "minimal-template-node" -description = "A miniaml Substrate-based Substrate node, ready for hacking." +description = "A minimal Substrate-based Substrate node, ready for hacking." version = "0.0.0" license = "MIT-0" authors.workspace = true diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml index 20ffb706eb4..a99a1e43f85 100644 --- a/templates/minimal/runtime/Cargo.toml +++ b/templates/minimal/runtime/Cargo.toml @@ -29,7 +29,7 @@ pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-featur pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -# genesis builder that allows us to interacto with runtime genesis config +# genesis builder that allows us to interact with runtime genesis config sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } # local pallet templates -- GitLab From fd79b3b08a9bd8f57cd6183b84fd34705e83a7a0 Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Tue, 26 Mar 2024 16:51:47 +0100 Subject: [PATCH 032/128] [subsystem-benchmarks] Save results to json (#3829) Here we add the ability to save subsystem benchmark results in JSON format to display them as graphs To draw graphs, CI team will use [github-action-benchmark](https://github.com/benchmark-action/github-action-benchmark). Since we are using custom benchmarks, we need to prepare [a specific data type](https://github.com/benchmark-action/github-action-benchmark?tab=readme-ov-file#examples): ``` [ { "name": "CPU Load", "unit": "Percent", "value": 50 } ] ``` Then we'll get graphs like this: ![example](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/main.png) [A live page with graphs](https://benchmark-action.github.io/github-action-benchmark/dev/bench/) --------- Co-authored-by: ordian --- Cargo.lock | 1 + ...ilability-distribution-regression-bench.rs | 7 ++ .../availability-recovery-regression-bench.rs | 7 ++ polkadot/node/subsystem-bench/Cargo.toml | 1 + .../subsystem-bench/src/lib/environment.rs | 2 +- polkadot/node/subsystem-bench/src/lib/lib.rs | 1 + .../node/subsystem-bench/src/lib/usage.rs | 28 +++++++ .../node/subsystem-bench/src/lib/utils.rs | 75 +++++-------------- 8 files changed, 66 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 074b657e767..9e52bfcf9a4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13644,6 +13644,7 @@ dependencies = [ "sc-service", "schnorrkel 0.11.4", "serde", + "serde_json", "serde_yaml", "sha1", "sp-application-crypto", diff --git a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs index 019eb122208..c33674a8f2f 100644 --- a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs +++ b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs @@ -27,6 +27,7 @@ use polkadot_subsystem_bench::{ availability::{benchmark_availability_write, prepare_test, TestState}, configuration::TestConfiguration, usage::BenchmarkUsage, + utils::save_to_file, }; use std::io::Write; @@ -60,7 +61,13 @@ fn main() -> Result<(), String> { }) .collect(); println!("\rDone!{}", " ".repeat(BENCH_COUNT)); + let average_usage = BenchmarkUsage::average(&usages); + save_to_file( + "charts/availability-distribution-regression-bench.json", + average_usage.to_chart_json().map_err(|e| e.to_string())?, + ) + .map_err(|e| e.to_string())?; println!("{}", average_usage); // We expect no variance for received and sent diff --git a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs index 5e8b81be82d..46a38516898 100644 --- a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs +++ b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs @@ -28,6 +28,7 @@ use polkadot_subsystem_bench::{ }, configuration::TestConfiguration, usage::BenchmarkUsage, + utils::save_to_file, }; use std::io::Write; @@ -58,7 +59,13 @@ fn main() -> Result<(), String> { }) .collect(); println!("\rDone!{}", " ".repeat(BENCH_COUNT)); + let average_usage = BenchmarkUsage::average(&usages); + save_to_file( + "charts/availability-recovery-regression-bench.json", + average_usage.to_chart_json().map_err(|e| e.to_string())?, + ) + .map_err(|e| e.to_string())?; println!("{}", average_usage); // We expect no variance for received and sent diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 05907e428f9..b494f05180d 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -71,6 +71,7 @@ prometheus_endpoint = { package = "substrate-prometheus-endpoint", path = "../.. prometheus = { version = "0.13.0", default-features = false } serde = { workspace = true, default-features = true } serde_yaml = { workspace = true } +serde_json = { workspace = true } polkadot-node-core-approval-voting = { path = "../core/approval-voting" } polkadot-approval-distribution = { path = "../network/approval-distribution" } diff --git a/polkadot/node/subsystem-bench/src/lib/environment.rs b/polkadot/node/subsystem-bench/src/lib/environment.rs index 2d80d75a14a..42955d03022 100644 --- a/polkadot/node/subsystem-bench/src/lib/environment.rs +++ b/polkadot/node/subsystem-bench/src/lib/environment.rs @@ -404,7 +404,7 @@ impl TestEnvironment { let total_cpu = test_env_cpu_metrics.sum_by("substrate_tasks_polling_duration_sum"); usage.push(ResourceUsage { - resource_name: "Test environment".to_string(), + resource_name: "test-environment".to_string(), total: total_cpu, per_block: total_cpu / num_blocks, }); diff --git a/polkadot/node/subsystem-bench/src/lib/lib.rs b/polkadot/node/subsystem-bench/src/lib/lib.rs index d06f2822a89..ef2724abc98 100644 --- a/polkadot/node/subsystem-bench/src/lib/lib.rs +++ b/polkadot/node/subsystem-bench/src/lib/lib.rs @@ -26,3 +26,4 @@ pub(crate) mod keyring; pub(crate) mod mock; pub(crate) mod network; pub mod usage; +pub mod utils; diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs index 7172969a8f9..59296746ec3 100644 --- a/polkadot/node/subsystem-bench/src/lib/usage.rs +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -82,6 +82,27 @@ impl BenchmarkUsage { _ => None, } } + + // Prepares a json string for a graph representation + // See: https://github.com/benchmark-action/github-action-benchmark?tab=readme-ov-file#examples + pub fn to_chart_json(&self) -> color_eyre::eyre::Result { + let chart = self + .network_usage + .iter() + .map(|v| ChartItem { + name: v.resource_name.clone(), + unit: "KiB".to_string(), + value: v.per_block, + }) + .chain(self.cpu_usage.iter().map(|v| ChartItem { + name: v.resource_name.clone(), + unit: "seconds".to_string(), + value: v.per_block, + })) + .collect::>(); + + Ok(serde_json::to_string(&chart)?) + } } fn check_usage( @@ -151,3 +172,10 @@ impl ResourceUsage { } type ResourceUsageCheck<'a> = (&'a str, f64, f64); + +#[derive(Debug, Serialize)] +pub struct ChartItem { + pub name: String, + pub unit: String, + pub value: f64, +} diff --git a/polkadot/node/subsystem-bench/src/lib/utils.rs b/polkadot/node/subsystem-bench/src/lib/utils.rs index cd206d8f322..b3cd3a88b6c 100644 --- a/polkadot/node/subsystem-bench/src/lib/utils.rs +++ b/polkadot/node/subsystem-bench/src/lib/utils.rs @@ -16,61 +16,26 @@ //! Test utils -use crate::usage::BenchmarkUsage; -use std::io::{stdout, Write}; - -pub struct WarmUpOptions<'a> { - /// The maximum number of runs considered for warming up. - pub warm_up: usize, - /// The number of runs considered for benchmarking. - pub bench: usize, - /// The difference in CPU usage between runs considered as normal - pub precision: f64, - /// The subsystems whose CPU usage is checked during warm-up cycles - pub subsystems: &'a [&'a str], -} - -impl<'a> WarmUpOptions<'a> { - pub fn new(subsystems: &'a [&'a str]) -> Self { - Self { warm_up: 100, bench: 3, precision: 0.02, subsystems } - } -} - -pub fn warm_up_and_benchmark( - options: WarmUpOptions, - run: impl Fn() -> BenchmarkUsage, -) -> Result { - println!("Warming up..."); - let mut usages = Vec::with_capacity(options.bench); - - for n in 1..=options.warm_up { - let curr = run(); - if let Some(prev) = usages.last() { - let diffs = options - .subsystems - .iter() - .map(|&v| { - curr.cpu_usage_diff(prev, v) - .ok_or(format!("{} not found in benchmark {:?}", v, prev)) - }) - .collect::, String>>()?; - if !diffs.iter().all(|&v| v < options.precision) { - usages.clear(); - } - } - usages.push(curr); - print!("\r{}%", n * 100 / options.warm_up); - if usages.len() == options.bench { - println!("\rTook {} runs to warm up", n.saturating_sub(options.bench)); - break; - } - stdout().flush().unwrap(); - } - - if usages.len() != options.bench { - println!("Didn't warm up after {} runs", options.warm_up); - return Err("Can't warm up".to_string()) +use std::{fs::File, io::Write}; + +// Saves a given string to a file +pub fn save_to_file(path: &str, value: String) -> color_eyre::eyre::Result<()> { + let output = std::process::Command::new(env!("CARGO")) + .arg("locate-project") + .arg("--workspace") + .arg("--message-format=plain") + .output() + .unwrap() + .stdout; + let workspace_dir = std::path::Path::new(std::str::from_utf8(&output).unwrap().trim()) + .parent() + .unwrap(); + let path = workspace_dir.join(path); + if let Some(dir) = path.parent() { + std::fs::create_dir_all(dir)?; } + let mut file = File::create(path)?; + file.write_all(value.as_bytes())?; - Ok(BenchmarkUsage::average(&usages)) + Ok(()) } -- GitLab From 90234543f3039ce29bf0c9badfc6ec2037308eea Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Tue, 26 Mar 2024 17:54:24 +0200 Subject: [PATCH 033/128] Migrate parachain swaps to Coretime (#3714) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR notifies broker pallet for any parachain slot swaps performed on the relay chain. This is achieved by registering an `OnSwap` for the the `coretime` pallet. The hook sends XCM message to the broker chain and invokes a new extrinsic `swap_leases` which updates `Leases` storage item (which keeps the legacy parachain leases). I made two assumptions in this PR: 1. [`Leases`](https://github.com/paritytech/polkadot-sdk/blob/4987d7982461e2e5ffe219cdf71ec697284cea7c/substrate/frame/broker/src/lib.rs#L120) in `broker` pallet and [`Leases`](https://github.com/paritytech/polkadot-sdk/blob/4987d7982461e2e5ffe219cdf71ec697284cea7c/polkadot/runtime/common/src/slots/mod.rs#L118) in `slots` pallet are in sync. 2. `swap_leases` extrinsic from `broker` pallet can be triggered only by root or by the XCM message from the relay chain. If not - the extrinsic will generate an error and do nothing. As a side effect from the changes `OnSwap` trait is moved from runtime/common/traits.rs to runtime/parachains. Otherwise it is not accessible from `broker` pallet. Closes https://github.com/paritytech/polkadot-sdk/issues/3552 TODOs: - [x] Weights - [x] Tests --------- Co-authored-by: command-bot <> Co-authored-by: eskimor Co-authored-by: Bastian Köcher --- .../src/weights/pallet_broker.rs | 158 ++++---- .../src/weights/pallet_broker.rs | 160 ++++---- .../runtime/parachains/src/coretime/mod.rs | 20 + polkadot/runtime/rococo/src/lib.rs | 12 +- polkadot/runtime/westend/src/lib.rs | 12 +- prdoc/pr_3714.prdoc | 10 + substrate/frame/broker/src/benchmarking.rs | 17 + .../frame/broker/src/dispatchable_impls.rs | 18 + substrate/frame/broker/src/lib.rs | 8 + substrate/frame/broker/src/weights.rs | 368 ++++++++++-------- 10 files changed, 461 insertions(+), 322 deletions(-) create mode 100644 prdoc/pr_3714.prdoc diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs index 2d30ddc612c..89b1c4c8663 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs @@ -16,26 +16,24 @@ //! Autogenerated weights for `pallet_broker` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=coretime-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_broker -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_broker +// --chain=coretime-rococo-dev // --header=./cumulus/file_header.txt // --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ @@ -56,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_462_000 picoseconds. - Weight::from_parts(2_552_000, 0) + // Minimum execution time: 1_918_000 picoseconds. + Weight::from_parts(2_092_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -67,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 25_494_000 picoseconds. - Weight::from_parts(26_063_000, 0) + // Minimum execution time: 21_943_000 picoseconds. + Weight::from_parts(22_570_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -79,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 22_299_000 picoseconds. - Weight::from_parts(22_911_000, 0) + // Minimum execution time: 20_923_000 picoseconds. + Weight::from_parts(21_354_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -95,8 +93,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `466` // Estimated: `1951` - // Minimum execution time: 11_590_000 picoseconds. - Weight::from_parts(12_007_000, 0) + // Minimum execution time: 10_687_000 picoseconds. + Weight::from_parts(11_409_000, 0) .saturating_add(Weight::from_parts(0, 1951)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -124,11 +122,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12567` // Estimated: `14052` - // Minimum execution time: 120_928_000 picoseconds. - Weight::from_parts(124_947_252, 0) + // Minimum execution time: 111_288_000 picoseconds. + Weight::from_parts(117_804_282, 0) .saturating_add(Weight::from_parts(0, 14052)) - // Standard Error: 435 - .saturating_add(Weight::from_parts(1_246, 0).saturating_mul(n.into())) + // Standard Error: 391 + .saturating_add(Weight::from_parts(1_243, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(66)) } @@ -144,8 +142,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `316` // Estimated: `3593` - // Minimum execution time: 32_826_000 picoseconds. - Weight::from_parts(33_889_000, 0) + // Minimum execution time: 33_006_000 picoseconds. + Weight::from_parts(34_256_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -166,8 +164,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `434` // Estimated: `4698` - // Minimum execution time: 57_362_000 picoseconds. - Weight::from_parts(58_994_000, 0) + // Minimum execution time: 61_473_000 picoseconds. + Weight::from_parts(66_476_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -178,8 +176,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 13_982_000 picoseconds. - Weight::from_parts(14_447_000, 0) + // Minimum execution time: 13_771_000 picoseconds. + Weight::from_parts(14_374_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -190,8 +188,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 15_070_000 picoseconds. - Weight::from_parts(15_735_000, 0) + // Minimum execution time: 15_162_000 picoseconds. + Weight::from_parts(15_742_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -202,8 +200,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 16_527_000 picoseconds. - Weight::from_parts(16_894_000, 0) + // Minimum execution time: 16_196_000 picoseconds. + Weight::from_parts(16_796_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) @@ -220,8 +218,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `936` // Estimated: `4681` - // Minimum execution time: 25_493_000 picoseconds. - Weight::from_parts(26_091_000, 0) + // Minimum execution time: 25_653_000 picoseconds. + Weight::from_parts(27_006_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -240,8 +238,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `5996` - // Minimum execution time: 31_498_000 picoseconds. - Weight::from_parts(32_560_000, 0) + // Minimum execution time: 31_114_000 picoseconds. + Weight::from_parts(32_235_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -257,11 +255,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `652` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 57_183_000 picoseconds. - Weight::from_parts(58_024_898, 0) + // Minimum execution time: 57_280_000 picoseconds. + Weight::from_parts(58_127_480, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 35_831 - .saturating_add(Weight::from_parts(1_384_446, 0).saturating_mul(m.into())) + // Standard Error: 41_670 + .saturating_add(Weight::from_parts(1_203_066, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -283,8 +281,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `215` // Estimated: `3680` - // Minimum execution time: 59_762_000 picoseconds. - Weight::from_parts(61_114_000, 0) + // Minimum execution time: 59_968_000 picoseconds. + Weight::from_parts(62_315_000, 0) .saturating_add(Weight::from_parts(0, 3680)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -297,8 +295,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `465` // Estimated: `3550` - // Minimum execution time: 41_473_000 picoseconds. - Weight::from_parts(44_155_000, 0) + // Minimum execution time: 50_887_000 picoseconds. + Weight::from_parts(57_366_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -313,8 +311,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 56_672_000 picoseconds. - Weight::from_parts(58_086_000, 0) + // Minimum execution time: 84_472_000 picoseconds. + Weight::from_parts(96_536_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -331,8 +329,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `857` // Estimated: `3593` - // Minimum execution time: 64_460_000 picoseconds. - Weight::from_parts(65_894_000, 0) + // Minimum execution time: 96_371_000 picoseconds. + Weight::from_parts(104_659_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -345,8 +343,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `957` // Estimated: `4698` - // Minimum execution time: 37_447_000 picoseconds. - Weight::from_parts(42_318_000, 0) + // Minimum execution time: 51_741_000 picoseconds. + Weight::from_parts(54_461_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -366,8 +364,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 21_219_000 picoseconds. - Weight::from_parts(22_084_648, 0) + // Minimum execution time: 19_901_000 picoseconds. + Weight::from_parts(21_028_116, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -379,11 +377,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 5_792_000 picoseconds. - Weight::from_parts(6_358_588, 0) + // Minimum execution time: 5_987_000 picoseconds. + Weight::from_parts(6_412_478, 0) .saturating_add(Weight::from_parts(0, 1487)) - // Standard Error: 20 - .saturating_add(Weight::from_parts(26, 0).saturating_mul(n.into())) + // Standard Error: 16 + .saturating_add(Weight::from_parts(47, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -397,8 +395,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `447` // Estimated: `6196` - // Minimum execution time: 38_690_000 picoseconds. - Weight::from_parts(39_706_000, 0) + // Minimum execution time: 38_623_000 picoseconds. + Weight::from_parts(39_773_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -414,15 +412,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Workplan` (r:0 w:60) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(n: u32, ) -> Weight { + fn rotate_sale(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `12514` // Estimated: `13506` - // Minimum execution time: 93_531_000 picoseconds. - Weight::from_parts(95_836_318, 0) + // Minimum execution time: 97_074_000 picoseconds. + Weight::from_parts(101_247_740, 0) .saturating_add(Weight::from_parts(0, 13506)) - // Standard Error: 113 - .saturating_add(Weight::from_parts(329, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(65)) } @@ -434,8 +430,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 6_506_000 picoseconds. - Weight::from_parts(6_783_000, 0) + // Minimum execution time: 6_317_000 picoseconds. + Weight::from_parts(6_521_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -458,8 +454,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 31_927_000 picoseconds. - Weight::from_parts(32_748_000, 0) + // Minimum execution time: 32_575_000 picoseconds. + Weight::from_parts(33_299_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -478,8 +474,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 15_682_000 picoseconds. - Weight::from_parts(16_012_000, 0) + // Minimum execution time: 15_256_000 picoseconds. + Weight::from_parts(15_927_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -490,8 +486,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_147_000 picoseconds. - Weight::from_parts(2_281_000, 0) + // Minimum execution time: 1_783_000 picoseconds. + Weight::from_parts(1_904_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -509,10 +505,22 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `398` // Estimated: `3863` - // Minimum execution time: 12_015_000 picoseconds. - Weight::from_parts(12_619_000, 0) + // Minimum execution time: 12_307_000 picoseconds. + Weight::from_parts(12_967_000, 0) .saturating_add(Weight::from_parts(0, 3863)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(401), added: 896, mode: `MaxEncodedLen`) + fn swap_leases() -> Weight { + // Proof Size summary in bytes: + // Measured: `470` + // Estimated: `1886` + // Minimum execution time: 6_597_000 picoseconds. + Weight::from_parts(6_969_000, 0) + .saturating_add(Weight::from_parts(0, 1886)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs index 8727b9633b1..13d5fcf3898 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs @@ -17,25 +17,23 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=coretime-westend-dev -// --wasm-execution=compiled -// --pallet=pallet_broker -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_broker +// --chain=coretime-westend-dev // --header=./cumulus/file_header.txt // --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ @@ -56,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_944_000 picoseconds. - Weight::from_parts(2_045_000, 0) + // Minimum execution time: 1_897_000 picoseconds. + Weight::from_parts(2_053_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -67,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 21_158_000 picoseconds. - Weight::from_parts(21_572_000, 0) + // Minimum execution time: 22_550_000 picoseconds. + Weight::from_parts(22_871_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -79,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 20_497_000 picoseconds. - Weight::from_parts(20_995_000, 0) + // Minimum execution time: 21_170_000 picoseconds. + Weight::from_parts(21_645_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -95,8 +93,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `146` // Estimated: `1631` - // Minimum execution time: 10_280_000 picoseconds. - Weight::from_parts(10_686_000, 0) + // Minimum execution time: 10_494_000 picoseconds. + Weight::from_parts(10_942_000, 0) .saturating_add(Weight::from_parts(0, 1631)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -120,15 +118,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Workplan` (r:0 w:20) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(n: u32, ) -> Weight { + fn start_sales(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `12247` // Estimated: `13732` - // Minimum execution time: 61_020_000 picoseconds. - Weight::from_parts(63_240_622, 0) + // Minimum execution time: 61_014_000 picoseconds. + Weight::from_parts(63_267_651, 0) .saturating_add(Weight::from_parts(0, 13732)) - // Standard Error: 102 - .saturating_add(Weight::from_parts(255, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(26)) } @@ -144,8 +140,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `316` // Estimated: `3593` - // Minimum execution time: 30_627_000 picoseconds. - Weight::from_parts(31_648_000, 0) + // Minimum execution time: 30_931_000 picoseconds. + Weight::from_parts(31_941_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -166,8 +162,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `434` // Estimated: `4698` - // Minimum execution time: 57_701_000 picoseconds. - Weight::from_parts(59_825_000, 0) + // Minimum execution time: 57_466_000 picoseconds. + Weight::from_parts(65_042_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -178,8 +174,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 12_898_000 picoseconds. - Weight::from_parts(13_506_000, 0) + // Minimum execution time: 12_799_000 picoseconds. + Weight::from_parts(13_401_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -190,8 +186,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 14_284_000 picoseconds. - Weight::from_parts(14_791_000, 0) + // Minimum execution time: 14_107_000 picoseconds. + Weight::from_parts(14_630_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -202,8 +198,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 15_570_000 picoseconds. - Weight::from_parts(16_158_000, 0) + // Minimum execution time: 15_254_000 picoseconds. + Weight::from_parts(16_062_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) @@ -220,8 +216,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `735` // Estimated: `4681` - // Minimum execution time: 23_329_000 picoseconds. - Weight::from_parts(24_196_000, 0) + // Minimum execution time: 23_557_000 picoseconds. + Weight::from_parts(24_382_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -240,8 +236,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `801` // Estimated: `5996` - // Minimum execution time: 29_288_000 picoseconds. - Weight::from_parts(30_066_000, 0) + // Minimum execution time: 29_371_000 picoseconds. + Weight::from_parts(30_200_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -257,11 +253,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `652` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 54_833_000 picoseconds. - Weight::from_parts(55_577_423, 0) + // Minimum execution time: 54_331_000 picoseconds. + Weight::from_parts(55_322_165, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 35_105 - .saturating_add(Weight::from_parts(1_267_911, 0).saturating_mul(m.into())) + // Standard Error: 35_225 + .saturating_add(Weight::from_parts(1_099_614, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -283,8 +279,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `215` // Estimated: `3680` - // Minimum execution time: 55_289_000 picoseconds. - Weight::from_parts(56_552_000, 0) + // Minimum execution time: 53_789_000 picoseconds. + Weight::from_parts(55_439_000, 0) .saturating_add(Weight::from_parts(0, 3680)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -297,8 +293,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `465` // Estimated: `3550` - // Minimum execution time: 39_736_000 picoseconds. - Weight::from_parts(41_346_000, 0) + // Minimum execution time: 43_941_000 picoseconds. + Weight::from_parts(49_776_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -313,8 +309,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 57_319_000 picoseconds. - Weight::from_parts(60_204_000, 0) + // Minimum execution time: 64_917_000 picoseconds. + Weight::from_parts(70_403_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -331,8 +327,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `857` // Estimated: `3593` - // Minimum execution time: 85_216_000 picoseconds. - Weight::from_parts(91_144_000, 0) + // Minimum execution time: 72_633_000 picoseconds. + Weight::from_parts(79_305_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -345,8 +341,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `556` // Estimated: `4698` - // Minimum execution time: 32_331_000 picoseconds. - Weight::from_parts(39_877_000, 0) + // Minimum execution time: 36_643_000 picoseconds. + Weight::from_parts(48_218_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -362,28 +358,28 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(n: u32, ) -> Weight { + fn request_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 18_128_000 picoseconds. - Weight::from_parts(19_061_234, 0) + // Minimum execution time: 17_617_000 picoseconds. + Weight::from_parts(18_904_788, 0) .saturating_add(Weight::from_parts(0, 3539)) - // Standard Error: 48 - .saturating_add(Weight::from_parts(141, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn process_core_count(_n: u32, ) -> Weight { + fn process_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 5_368_000 picoseconds. - Weight::from_parts(5_837_005, 0) + // Minimum execution time: 5_575_000 picoseconds. + Weight::from_parts(5_887_598, 0) .saturating_add(Weight::from_parts(0, 1487)) + // Standard Error: 16 + .saturating_add(Weight::from_parts(41, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -397,8 +393,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `447` // Estimated: `6196` - // Minimum execution time: 36_047_000 picoseconds. - Weight::from_parts(37_101_000, 0) + // Minimum execution time: 36_415_000 picoseconds. + Weight::from_parts(37_588_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -414,13 +410,15 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Workplan` (r:0 w:20) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(_n: u32, ) -> Weight { + fn rotate_sale(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `12194` // Estimated: `13506` - // Minimum execution time: 48_158_000 picoseconds. - Weight::from_parts(49_891_920, 0) + // Minimum execution time: 48_362_000 picoseconds. + Weight::from_parts(49_616_106, 0) .saturating_add(Weight::from_parts(0, 13506)) + // Standard Error: 61 + .saturating_add(Weight::from_parts(59, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(25)) } @@ -432,8 +430,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 5_911_000 picoseconds. - Weight::from_parts(6_173_000, 0) + // Minimum execution time: 6_148_000 picoseconds. + Weight::from_parts(6_374_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -456,8 +454,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 30_140_000 picoseconds. - Weight::from_parts(30_912_000, 0) + // Minimum execution time: 30_267_000 picoseconds. + Weight::from_parts(30_825_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -476,8 +474,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 13_684_000 picoseconds. - Weight::from_parts(14_252_000, 0) + // Minimum execution time: 13_491_000 picoseconds. + Weight::from_parts(13_949_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -488,8 +486,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_718_000 picoseconds. - Weight::from_parts(1_843_000, 0) + // Minimum execution time: 1_711_000 picoseconds. + Weight::from_parts(1_913_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -507,10 +505,22 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `398` // Estimated: `3863` - // Minimum execution time: 11_771_000 picoseconds. - Weight::from_parts(12_120_000, 0) + // Minimum execution time: 12_035_000 picoseconds. + Weight::from_parts(12_383_000, 0) .saturating_add(Weight::from_parts(0, 3863)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(81), added: 576, mode: `MaxEncodedLen`) + fn swap_leases() -> Weight { + // Proof Size summary in bytes: + // Measured: `150` + // Estimated: `1566` + // Minimum execution time: 6_142_000 picoseconds. + Weight::from_parts(6_538_000, 0) + .saturating_add(Weight::from_parts(0, 1566)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index eb9646d7e86..9095cd90ae0 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -83,6 +83,8 @@ enum CoretimeCalls { SetLease(pallet_broker::TaskId, pallet_broker::Timeslice), #[codec(index = 19)] NotifyCoreCount(u16), + #[codec(index = 99)] + SwapLeases(ParaId, ParaId), } #[frame_support::pallet] @@ -233,6 +235,24 @@ impl Pallet { } } } + + // Handle legacy swaps in coretime. Notifies broker parachain that a lease swap has occurred via + // XCM message. This function is meant to be used in an implementation of `OnSwap` trait. + pub fn on_legacy_lease_swap(one: ParaId, other: ParaId) { + let message = Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + mk_coretime_call(crate::coretime::CoretimeCalls::SwapLeases(one, other)), + ]); + if let Err(err) = send_xcm::( + Location::new(0, [Junction::Parachain(T::BrokerId::get())]), + message, + ) { + log::error!("Sending `SwapLeases` to coretime chain failed: {:?}", err); + } + } } impl OnNewSession> for Pallet { diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 90824a2f6f0..b26317490cd 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -38,7 +38,7 @@ use runtime_common::{ LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedLocationConverter, }, paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, - traits::Leaser, + traits::{Leaser, OnSwap}, BlockHashCount, BlockLength, SlowAdjustingFeeUpdate, }; use scale_info::TypeInfo; @@ -1078,7 +1078,7 @@ impl paras_registrar::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type OnSwap = (Crowdloan, Slots); + type OnSwap = (Crowdloan, Slots, SwapLeases); type ParaDeposit = ParaDeposit; type DataDepositPerByte = DataDepositPerByte; type WeightInfo = weights::runtime_common_paras_registrar::WeightInfo; @@ -1306,6 +1306,14 @@ impl pallet_asset_rate::Config for Runtime { type BenchmarkHelper = runtime_common::impls::benchmarks::AssetRateArguments; } +// Notify `coretime` pallet when a lease swap occurs +pub struct SwapLeases; +impl OnSwap for SwapLeases { + fn on_swap(one: ParaId, other: ParaId) { + coretime::Pallet::::on_legacy_lease_swap(one, other); + } +} + construct_runtime! { pub enum Runtime { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 664044b713e..12e6174ab6c 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -62,7 +62,7 @@ use runtime_common::{ LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedLocationConverter, }, paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, - traits::Leaser, + traits::{Leaser, OnSwap}, BalanceToU256, BlockHashCount, BlockLength, CurrencyToVote, SlowAdjustingFeeUpdate, U256ToBalance, }; @@ -1302,7 +1302,7 @@ impl paras_registrar::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type OnSwap = (Crowdloan, Slots); + type OnSwap = (Crowdloan, Slots, SwapLeases); type ParaDeposit = ParaDeposit; type DataDepositPerByte = RegistrarDataDepositPerByte; type WeightInfo = weights::runtime_common_paras_registrar::WeightInfo; @@ -1414,6 +1414,14 @@ impl pallet_asset_rate::Config for Runtime { type BenchmarkHelper = runtime_common::impls::benchmarks::AssetRateArguments; } +// Notify `coretime` pallet when a lease swap occurs +pub struct SwapLeases; +impl OnSwap for SwapLeases { + fn on_swap(one: ParaId, other: ParaId) { + coretime::Pallet::::on_legacy_lease_swap(one, other); + } +} + #[frame_support::runtime(legacy_ordering)] mod runtime { #[runtime::runtime] diff --git a/prdoc/pr_3714.prdoc b/prdoc/pr_3714.prdoc new file mode 100644 index 00000000000..e276d0d2d37 --- /dev/null +++ b/prdoc/pr_3714.prdoc @@ -0,0 +1,10 @@ +title: Handle legacy lease swaps on coretime + +doc: + - audience: Runtime Dev + description: | + When a `registar::swap` extrinsic is executed it swaps two leases on the relay chain but the + broker chain never knows about this swap. This change notifies the broker chain via a XCM + message for a swap so that it can update its state. +crates: + - name: pallet-broker diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 70f488e998c..98ac074ca91 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -918,6 +918,23 @@ mod benches { Ok(()) } + #[benchmark] + fn swap_leases() -> Result<(), BenchmarkError> { + let admin_origin = + T::AdminOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + + // Add two leases in `Leases` + let n = (T::MaxLeasedCores::get() / 2) as usize; + let mut leases = vec![LeaseRecordItem { task: 1, until: 10u32.into() }; n]; + leases.extend(vec![LeaseRecordItem { task: 2, until: 20u32.into() }; n]); + Leases::::put(BoundedVec::try_from(leases).unwrap()); + + #[extrinsic_call] + _(admin_origin as T::RuntimeOrigin, 1, 2); + + Ok(()) + } + // Implements a test for each benchmark. Execute with: // `cargo test -p pallet-broker --features runtime-benchmarks`. impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index f2451013251..74cda9c4f4c 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -437,4 +437,22 @@ impl Pallet { Self::deposit_event(Event::AllowedRenewalDropped { core, when }); Ok(()) } + + pub(crate) fn do_swap_leases(id: TaskId, other: TaskId) -> DispatchResult { + let mut id_leases_count = 0; + let mut other_leases_count = 0; + Leases::::mutate(|leases| { + leases.iter_mut().for_each(|lease| { + if lease.task == id { + lease.task = other; + id_leases_count += 1; + } else if lease.task == other { + lease.task = id; + other_leases_count += 1; + } + }) + }); + + Ok(()) + } } diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index f1b49a73a52..b9b5e309ca9 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -786,5 +786,13 @@ pub mod pallet { Self::do_notify_core_count(core_count)?; Ok(()) } + + #[pallet::call_index(99)] + #[pallet::weight(T::WeightInfo::swap_leases())] + pub fn swap_leases(origin: OriginFor, id: TaskId, other: TaskId) -> DispatchResult { + T::AdminOrigin::ensure_origin_or_root(origin)?; + Self::do_swap_leases(id, other)?; + Ok(()) + } } } diff --git a/substrate/frame/broker/src/weights.rs b/substrate/frame/broker/src/weights.rs index a8f50eeee6e..a8b9fb598b8 100644 --- a/substrate/frame/broker/src/weights.rs +++ b/substrate/frame/broker/src/weights.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_broker` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-pzhd7p6z-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -76,6 +76,7 @@ pub trait WeightInfo { fn request_revenue_info_at() -> Weight; fn notify_core_count() -> Weight; fn do_tick_base() -> Weight; + fn swap_leases() -> Weight; } /// Weights for `pallet_broker` using the Substrate node and recommended hardware. @@ -87,8 +88,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_040_000 picoseconds. - Weight::from_parts(3_344_000, 0) + // Minimum execution time: 2_865_000 picoseconds. + Weight::from_parts(3_061_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -97,8 +98,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 21_259_000 picoseconds. - Weight::from_parts(22_110_000, 7496) + // Minimum execution time: 18_431_000 picoseconds. + Weight::from_parts(19_558_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -108,8 +109,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 20_330_000 picoseconds. - Weight::from_parts(20_826_000, 7496) + // Minimum execution time: 17_724_000 picoseconds. + Weight::from_parts(18_688_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -119,8 +120,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 13_411_000 picoseconds. - Weight::from_parts(13_960_000, 1526) + // Minimum execution time: 10_513_000 picoseconds. + Weight::from_parts(11_138_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -139,14 +140,12 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(n: u32, ) -> Weight { + fn start_sales(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 57_770_000 picoseconds. - Weight::from_parts(61_047_512, 8499) - // Standard Error: 165 - .saturating_add(Weight::from_parts(3, 0).saturating_mul(n.into())) + // Minimum execution time: 50_864_000 picoseconds. + Weight::from_parts(54_000_280, 8499) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(16_u64)) } @@ -162,10 +161,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `568` - // Estimated: `2053` - // Minimum execution time: 51_196_000 picoseconds. - Weight::from_parts(52_382_000, 2053) + // Measured: `635` + // Estimated: `2120` + // Minimum execution time: 43_630_000 picoseconds. + Weight::from_parts(44_622_000, 2120) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -185,10 +184,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `686` + // Measured: `753` // Estimated: `4698` - // Minimum execution time: 71_636_000 picoseconds. - Weight::from_parts(73_679_000, 4698) + // Minimum execution time: 62_453_000 picoseconds. + Weight::from_parts(63_882_000, 4698) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -198,8 +197,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 19_182_000 picoseconds. - Weight::from_parts(19_775_000, 3550) + // Minimum execution time: 17_237_000 picoseconds. + Weight::from_parts(17_757_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -209,21 +208,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 20_688_000 picoseconds. - Weight::from_parts(21_557_000, 3550) + // Minimum execution time: 18_504_000 picoseconds. + Weight::from_parts(19_273_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: `Broker::Regions` (r:1 w:2) + /// Storage: `Broker::Regions` (r:1 w:3) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 21_190_000 picoseconds. - Weight::from_parts(22_215_000, 3550) + // Minimum execution time: 20_477_000 picoseconds. + Weight::from_parts(21_328_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -237,8 +236,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `740` // Estimated: `4681` - // Minimum execution time: 34_591_000 picoseconds. - Weight::from_parts(36_227_000, 4681) + // Minimum execution time: 31_815_000 picoseconds. + Weight::from_parts(32_700_000, 4681) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -256,8 +255,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `775` // Estimated: `5996` - // Minimum execution time: 40_346_000 picoseconds. - Weight::from_parts(41_951_000, 5996) + // Minimum execution time: 38_313_000 picoseconds. + Weight::from_parts(38_985_000, 5996) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -272,10 +271,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 75_734_000 picoseconds. - Weight::from_parts(78_168_395, 6196) - // Standard Error: 63_180 - .saturating_add(Weight::from_parts(1_076_259, 0).saturating_mul(m.into())) + // Minimum execution time: 70_170_000 picoseconds. + Weight::from_parts(71_245_388, 6196) + // Standard Error: 54_382 + .saturating_add(Weight::from_parts(1_488_794, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -287,8 +286,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 46_383_000 picoseconds. - Weight::from_parts(47_405_000, 3593) + // Minimum execution time: 43_414_000 picoseconds. + Weight::from_parts(44_475_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -300,8 +299,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `603` // Estimated: `3550` - // Minimum execution time: 30_994_000 picoseconds. - Weight::from_parts(31_979_000, 3550) + // Minimum execution time: 31_327_000 picoseconds. + Weight::from_parts(32_050_000, 3550) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -315,8 +314,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 37_584_000 picoseconds. - Weight::from_parts(44_010_000, 3533) + // Minimum execution time: 41_315_000 picoseconds. + Weight::from_parts(42_421_000, 3533) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -330,10 +329,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `830` + // Measured: `995` // Estimated: `3593` - // Minimum execution time: 45_266_000 picoseconds. - Weight::from_parts(48_000_000, 3593) + // Minimum execution time: 49_707_000 picoseconds. + Weight::from_parts(51_516_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -343,10 +342,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: - // Measured: `525` + // Measured: `661` // Estimated: `4698` - // Minimum execution time: 25_365_000 picoseconds. - Weight::from_parts(26_920_000, 4698) + // Minimum execution time: 26_207_000 picoseconds. + Weight::from_parts(27_227_000, 4698) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -355,22 +354,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_519_000 picoseconds. - Weight::from_parts(7_098_698, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(8, 0).saturating_mul(n.into())) + // Minimum execution time: 4_670_000 picoseconds. + Weight::from_parts(5_170_450, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(37, 0).saturating_mul(n.into())) } - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn process_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `98` - // Estimated: `3563` - // Minimum execution time: 7_608_000 picoseconds. - Weight::from_parts(8_157_815, 3563) - // Standard Error: 26 - .saturating_add(Weight::from_parts(48, 0).saturating_mul(n.into())) + // Measured: `404` + // Estimated: `1487` + // Minimum execution time: 6_916_000 picoseconds. + Weight::from_parts(7_485_053, 1487) + // Standard Error: 23 + .saturating_add(Weight::from_parts(30, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -386,10 +385,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `905` - // Estimated: `4370` - // Minimum execution time: 59_993_000 picoseconds. - Weight::from_parts(61_752_000, 4370) + // Measured: `972` + // Estimated: `4437` + // Minimum execution time: 50_987_000 picoseconds. + Weight::from_parts(52_303_000, 4437) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -408,10 +407,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 41_863_000 picoseconds. - Weight::from_parts(44_033_031, 8499) - // Standard Error: 116 - .saturating_add(Weight::from_parts(764, 0).saturating_mul(n.into())) + // Minimum execution time: 38_334_000 picoseconds. + Weight::from_parts(40_517_609, 8499) + // Standard Error: 90 + .saturating_add(Weight::from_parts(338, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)) } @@ -423,8 +422,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 9_588_000 picoseconds. - Weight::from_parts(9_925_000, 3493) + // Minimum execution time: 7_850_000 picoseconds. + Weight::from_parts(8_157_000, 3493) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -436,8 +435,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 19_308_000 picoseconds. - Weight::from_parts(20_482_000, 4681) + // Minimum execution time: 17_313_000 picoseconds. + Weight::from_parts(17_727_000, 4681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -445,28 +444,46 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 147_000 picoseconds. - Weight::from_parts(184_000, 0) + // Minimum execution time: 171_000 picoseconds. + Weight::from_parts(196_000, 0) } + /// Storage: `Broker::CoreCountInbox` (r:0 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) fn notify_core_count() -> Weight { - T::DbWeight::get().reads_writes(1, 1) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_413_000 picoseconds. + Weight::from_parts(2_587_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:0) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `699` - // Estimated: `4164` - // Minimum execution time: 19_824_000 picoseconds. - Weight::from_parts(20_983_000, 4164) + // Measured: `603` + // Estimated: `4068` + // Minimum execution time: 13_121_000 picoseconds. + Weight::from_parts(13_685_000, 4068) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + fn swap_leases() -> Weight { + // Proof Size summary in bytes: + // Measured: `239` + // Estimated: `1526` + // Minimum execution time: 6_847_000 picoseconds. + Weight::from_parts(7_185_000, 1526) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -478,8 +495,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_040_000 picoseconds. - Weight::from_parts(3_344_000, 0) + // Minimum execution time: 2_865_000 picoseconds. + Weight::from_parts(3_061_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -488,8 +505,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 21_259_000 picoseconds. - Weight::from_parts(22_110_000, 7496) + // Minimum execution time: 18_431_000 picoseconds. + Weight::from_parts(19_558_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -499,8 +516,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 20_330_000 picoseconds. - Weight::from_parts(20_826_000, 7496) + // Minimum execution time: 17_724_000 picoseconds. + Weight::from_parts(18_688_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -510,8 +527,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 13_411_000 picoseconds. - Weight::from_parts(13_960_000, 1526) + // Minimum execution time: 10_513_000 picoseconds. + Weight::from_parts(11_138_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -530,14 +547,12 @@ impl WeightInfo for () { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(n: u32, ) -> Weight { + fn start_sales(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 57_770_000 picoseconds. - Weight::from_parts(61_047_512, 8499) - // Standard Error: 165 - .saturating_add(Weight::from_parts(3, 0).saturating_mul(n.into())) + // Minimum execution time: 50_864_000 picoseconds. + Weight::from_parts(54_000_280, 8499) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(16_u64)) } @@ -553,10 +568,10 @@ impl WeightInfo for () { /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `568` - // Estimated: `2053` - // Minimum execution time: 51_196_000 picoseconds. - Weight::from_parts(52_382_000, 2053) + // Measured: `635` + // Estimated: `2120` + // Minimum execution time: 43_630_000 picoseconds. + Weight::from_parts(44_622_000, 2120) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -576,10 +591,10 @@ impl WeightInfo for () { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `686` + // Measured: `753` // Estimated: `4698` - // Minimum execution time: 71_636_000 picoseconds. - Weight::from_parts(73_679_000, 4698) + // Minimum execution time: 62_453_000 picoseconds. + Weight::from_parts(63_882_000, 4698) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -589,8 +604,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 19_182_000 picoseconds. - Weight::from_parts(19_775_000, 3550) + // Minimum execution time: 17_237_000 picoseconds. + Weight::from_parts(17_757_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -600,21 +615,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 20_688_000 picoseconds. - Weight::from_parts(21_557_000, 3550) + // Minimum execution time: 18_504_000 picoseconds. + Weight::from_parts(19_273_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: `Broker::Regions` (r:1 w:2) + /// Storage: `Broker::Regions` (r:1 w:3) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 21_190_000 picoseconds. - Weight::from_parts(22_215_000, 3550) + // Minimum execution time: 20_477_000 picoseconds. + Weight::from_parts(21_328_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -628,8 +643,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `740` // Estimated: `4681` - // Minimum execution time: 34_591_000 picoseconds. - Weight::from_parts(36_227_000, 4681) + // Minimum execution time: 31_815_000 picoseconds. + Weight::from_parts(32_700_000, 4681) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -647,8 +662,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `775` // Estimated: `5996` - // Minimum execution time: 40_346_000 picoseconds. - Weight::from_parts(41_951_000, 5996) + // Minimum execution time: 38_313_000 picoseconds. + Weight::from_parts(38_985_000, 5996) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -663,10 +678,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 75_734_000 picoseconds. - Weight::from_parts(78_168_395, 6196) - // Standard Error: 63_180 - .saturating_add(Weight::from_parts(1_076_259, 0).saturating_mul(m.into())) + // Minimum execution time: 70_170_000 picoseconds. + Weight::from_parts(71_245_388, 6196) + // Standard Error: 54_382 + .saturating_add(Weight::from_parts(1_488_794, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -678,8 +693,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 46_383_000 picoseconds. - Weight::from_parts(47_405_000, 3593) + // Minimum execution time: 43_414_000 picoseconds. + Weight::from_parts(44_475_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -691,8 +706,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `603` // Estimated: `3550` - // Minimum execution time: 30_994_000 picoseconds. - Weight::from_parts(31_979_000, 3550) + // Minimum execution time: 31_327_000 picoseconds. + Weight::from_parts(32_050_000, 3550) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -706,8 +721,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 37_584_000 picoseconds. - Weight::from_parts(44_010_000, 3533) + // Minimum execution time: 41_315_000 picoseconds. + Weight::from_parts(42_421_000, 3533) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -721,10 +736,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `830` + // Measured: `995` // Estimated: `3593` - // Minimum execution time: 45_266_000 picoseconds. - Weight::from_parts(48_000_000, 3593) + // Minimum execution time: 49_707_000 picoseconds. + Weight::from_parts(51_516_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -734,10 +749,10 @@ impl WeightInfo for () { /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: - // Measured: `525` + // Measured: `661` // Estimated: `4698` - // Minimum execution time: 25_365_000 picoseconds. - Weight::from_parts(26_920_000, 4698) + // Minimum execution time: 26_207_000 picoseconds. + Weight::from_parts(27_227_000, 4698) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -746,22 +761,22 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_519_000 picoseconds. - Weight::from_parts(7_098_698, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(8, 0).saturating_mul(n.into())) + // Minimum execution time: 4_670_000 picoseconds. + Weight::from_parts(5_170_450, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(37, 0).saturating_mul(n.into())) } - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn process_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `98` - // Estimated: `3563` - // Minimum execution time: 7_608_000 picoseconds. - Weight::from_parts(8_157_815, 3563) - // Standard Error: 26 - .saturating_add(Weight::from_parts(48, 0).saturating_mul(n.into())) + // Measured: `404` + // Estimated: `1487` + // Minimum execution time: 6_916_000 picoseconds. + Weight::from_parts(7_485_053, 1487) + // Standard Error: 23 + .saturating_add(Weight::from_parts(30, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -777,10 +792,10 @@ impl WeightInfo for () { /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `905` - // Estimated: `4370` - // Minimum execution time: 59_993_000 picoseconds. - Weight::from_parts(61_752_000, 4370) + // Measured: `972` + // Estimated: `4437` + // Minimum execution time: 50_987_000 picoseconds. + Weight::from_parts(52_303_000, 4437) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -799,10 +814,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 41_863_000 picoseconds. - Weight::from_parts(44_033_031, 8499) - // Standard Error: 116 - .saturating_add(Weight::from_parts(764, 0).saturating_mul(n.into())) + // Minimum execution time: 38_334_000 picoseconds. + Weight::from_parts(40_517_609, 8499) + // Standard Error: 90 + .saturating_add(Weight::from_parts(338, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(15_u64)) } @@ -814,8 +829,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 9_588_000 picoseconds. - Weight::from_parts(9_925_000, 3493) + // Minimum execution time: 7_850_000 picoseconds. + Weight::from_parts(8_157_000, 3493) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -827,8 +842,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 19_308_000 picoseconds. - Weight::from_parts(20_482_000, 4681) + // Minimum execution time: 17_313_000 picoseconds. + Weight::from_parts(17_727_000, 4681) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -836,28 +851,45 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 147_000 picoseconds. - Weight::from_parts(184_000, 0) + // Minimum execution time: 171_000 picoseconds. + Weight::from_parts(196_000, 0) } + /// Storage: `Broker::CoreCountInbox` (r:0 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) fn notify_core_count() -> Weight { - RocksDbWeight::get().reads(1) - .saturating_add(RocksDbWeight::get().writes(1)) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_413_000 picoseconds. + Weight::from_parts(2_587_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:0) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `699` - // Estimated: `4164` - // Minimum execution time: 19_824_000 picoseconds. - Weight::from_parts(20_983_000, 4164) + // Measured: `603` + // Estimated: `4068` + // Minimum execution time: 13_121_000 picoseconds. + Weight::from_parts(13_685_000, 4068) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + fn swap_leases() -> Weight { + // Proof Size summary in bytes: + // Measured: `239` + // Estimated: `1526` + // Minimum execution time: 6_847_000 picoseconds. + Weight::from_parts(7_185_000, 1526) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } } -- GitLab From 0c15d887519d58c50880ccd149b6ee9a7635b5bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 26 Mar 2024 18:00:39 +0000 Subject: [PATCH 034/128] westend: `SignedPhase` is a constant (#3646) In preparation for the merkleized metadata, we need to ensure that constants are actually constant. If we want to test the unsigned phase we could for example just disable signed voter. Or we add some extra mechanism to the pallet to disable the signed phase from time to time. --------- Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- substrate/frame/election-provider-multi-phase/src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 31a79577d1f..11577cd3526 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -586,10 +586,8 @@ pub mod pallet { type EstimateCallFee: EstimateCallFee, BalanceOf>; /// Duration of the unsigned phase. - #[pallet::constant] type UnsignedPhase: Get>; /// Duration of the signed phase. - #[pallet::constant] type SignedPhase: Get>; /// The minimum amount of improvement to the solution score that defines a solution as -- GitLab From 3c972fc19e14b55e8a077145cf2620bccb0a6f8d Mon Sep 17 00:00:00 2001 From: Pavel Orlov <45266194+PraetorP@users.noreply.github.com> Date: Wed, 27 Mar 2024 01:34:28 +0700 Subject: [PATCH 035/128] XCM Fee Payment Runtime API (#3607) The PR provides API for obtaining: - the weight required to execute an XCM message, - a list of acceptable `AssetId`s for message execution payment, - the cost of the weight in the specified acceptable `AssetId`. It is meant to address an issue where one has to guess how much fee to pay for execution. Also, at the moment, a client has to guess which assets are acceptable for fee execution payment. See the related issue https://github.com/paritytech/polkadot-sdk/issues/690. With this API, a client is supposed to query the list of the supported asset IDs (in the XCM version format the client understands), weigh the XCM program the client wants to execute and convert the weight into one of the acceptable assets. Note that the client is supposed to know what program will be executed on what chains. However, having a small companion JS library for the pallet-xcm and xtokens should be enough to determine what XCM programs will be executed and where (since these pallets compose a known small set of programs). ```Rust pub trait XcmPaymentApi where Call: Codec, { /// Returns a list of acceptable payment assets. /// /// # Arguments /// /// * `xcm_version`: Version. fn query_acceptable_payment_assets(xcm_version: Version) -> Result, Error>; /// Returns a weight needed to execute a XCM. /// /// # Arguments /// /// * `message`: `VersionedXcm`. fn query_xcm_weight(message: VersionedXcm) -> Result; /// Converts a weight into a fee for the specified `AssetId`. /// /// # Arguments /// /// * `weight`: convertible `Weight`. /// * `asset`: `VersionedAssetId`. fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result; /// Get delivery fees for sending a specific `message` to a `destination`. /// These always come in a specific asset, defined by the chain. /// /// # Arguments /// * `message`: The message that'll be sent, necessary because most delivery fees are based on the /// size of the message. /// * `destination`: The destination to send the message to. Different destinations may use /// different senders that charge different fees. fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result; } ``` An [example](https://gist.github.com/PraetorP/4bc323ff85401abe253897ba990ec29d) of a client side code. --------- Co-authored-by: Francisco Aguirre Co-authored-by: Adrian Catangiu Co-authored-by: Daniel Shiposha --- Cargo.lock | 19 ++++ Cargo.toml | 1 + polkadot/node/service/Cargo.toml | 3 + polkadot/node/service/src/fake_runtime_api.rs | 21 +++- polkadot/runtime/rococo/Cargo.toml | 2 + polkadot/runtime/rococo/src/lib.rs | 53 ++++++++-- polkadot/runtime/westend/Cargo.toml | 2 + polkadot/runtime/westend/src/lib.rs | 51 ++++++++-- polkadot/runtime/westend/src/tests.rs | 1 - polkadot/xcm/pallet-xcm/Cargo.toml | 2 + polkadot/xcm/pallet-xcm/src/lib.rs | 32 ++++++ polkadot/xcm/src/lib.rs | 10 ++ .../xcm-fee-payment-runtime-api/Cargo.toml | 40 ++++++++ .../xcm-fee-payment-runtime-api/src/lib.rs | 99 +++++++++++++++++++ prdoc/pr_3607.prdoc | 26 +++++ 15 files changed, 342 insertions(+), 20 deletions(-) create mode 100644 polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml create mode 100644 polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs create mode 100644 prdoc/pr_3607.prdoc diff --git a/Cargo.lock b/Cargo.lock index 9e52bfcf9a4..f246a978fd2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11188,6 +11188,7 @@ dependencies = [ "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -13540,12 +13541,14 @@ dependencies = [ "sp-transaction-pool", "sp-version", "sp-weights", + "staging-xcm", "substrate-prometheus-endpoint", "tempfile", "thiserror", "tracing-gum", "westend-runtime", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -15116,6 +15119,7 @@ dependencies = [ "substrate-wasm-builder", "tiny-keccak", "tokio", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -21970,6 +21974,7 @@ dependencies = [ "tiny-keccak", "tokio", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -22443,6 +22448,20 @@ dependencies = [ "staging-xcm-executor", ] +[[package]] +name = "xcm-fee-payment-runtime-api" +version = "0.1.0" +dependencies = [ + "frame-support", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-runtime", + "sp-std 14.0.0", + "sp-weights", + "staging-xcm", +] + [[package]] name = "xcm-procedural" version = "7.0.0" diff --git a/Cargo.toml b/Cargo.toml index 5eeac597827..e6162830375 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -214,6 +214,7 @@ members = [ "polkadot/xcm/xcm-builder", "polkadot/xcm/xcm-executor", "polkadot/xcm/xcm-executor/integration-tests", + "polkadot/xcm/xcm-fee-payment-runtime-api", "polkadot/xcm/xcm-simulator", "polkadot/xcm/xcm-simulator/example", "polkadot/xcm/xcm-simulator/fuzzer", diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index e2bccfa5510..5a42443c84c 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -142,6 +142,9 @@ polkadot-node-core-pvf-checker = { path = "../core/pvf-checker", optional = true polkadot-node-core-runtime-api = { path = "../core/runtime-api", optional = true } polkadot-statement-distribution = { path = "../network/statement-distribution", optional = true } +xcm = { package = "staging-xcm", path = "../../xcm" } +xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api" } + [dev-dependencies] polkadot-test-client = { path = "../test/client" } polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index 085ea93fdc7..c6cfb7a27d0 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -30,6 +30,7 @@ use polkadot_primitives::{ ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; + use sp_core::OpaqueMetadata; use sp_runtime::{ traits::Block as BlockT, @@ -39,7 +40,7 @@ use sp_runtime::{ use sp_version::RuntimeVersion; use sp_weights::Weight; use std::collections::BTreeMap; - +use xcm::{VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; sp_api::decl_runtime_apis! { /// This runtime API is only implemented for the test runtime! pub trait GetLastTimestamp { @@ -396,4 +397,22 @@ sp_api::impl_runtime_apis! { unimplemented!() } } + + impl xcm_fee_payment_runtime_api::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(_: xcm::Version) -> Result, xcm_fee_payment_runtime_api::Error> { + unimplemented!() + } + + fn query_weight_to_asset_fee(_: Weight, _: VersionedAssetId) -> Result { + unimplemented!() + } + + fn query_xcm_weight(_: VersionedXcm<()>) -> Result { + unimplemented!() + } + + fn query_delivery_fees(_: VersionedLocation, _: VersionedXcm<()>) -> Result { + unimplemented!() + } + } } diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 3dc59cc1728..6f63a93cebe 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -104,6 +104,7 @@ polkadot-parachain-primitives = { path = "../../parachain", default-features = f xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api", default-features = false } [dev-dependencies] tiny-keccak = { version = "2.0.2", features = ["keccak"] } @@ -208,6 +209,7 @@ std = [ "tx-pool-api/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] runtime-benchmarks = [ diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index b26317490cd..8c8abe97ede 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -75,7 +75,7 @@ use frame_support::{ InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, StorageMapShim, WithdrawReasons, }, - weights::{ConstantMultiplier, WeightMeter}, + weights::{ConstantMultiplier, WeightMeter, WeightToFee as _}, PalletId, }; use frame_system::{EnsureRoot, EnsureSigned}; @@ -98,7 +98,10 @@ use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use xcm::{latest::prelude::*, VersionedLocation}; +use xcm::{ + latest::prelude::*, IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, + VersionedXcm, +}; use xcm_builder::PayOverXcm; pub use frame_system::Call as SystemCall; @@ -123,6 +126,7 @@ use governance::{ pallet_custom_origins, AuctionAdmin, Fellows, GeneralAdmin, LeaseAdmin, Treasurer, TreasurySpender, }; +use xcm_fee_payment_runtime_api::Error as XcmPaymentApiError; #[cfg(test)] mod tests; @@ -216,7 +220,7 @@ pub struct OriginPrivilegeCmp; impl PrivilegeCmp for OriginPrivilegeCmp { fn cmp_privilege(left: &OriginCaller, right: &OriginCaller) -> Option { if left == right { - return Some(Ordering::Equal) + return Some(Ordering::Equal); } match (left, right) { @@ -1493,11 +1497,11 @@ pub mod migrations { let now = frame_system::Pallet::::block_number(); let lease = slots::Pallet::::lease(para); if lease.is_empty() { - return None + return None; } // Lease not yet started, ignore: if lease.iter().any(Option::is_none) { - return None + return None; } let (index, _) = as Leaser>::lease_period_index(now)?; @@ -1559,7 +1563,7 @@ pub mod migrations { fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { log::warn!(target: "runtime::session_keys", "Skipping session keys migration pre-upgrade check due to spec version (already applied?)"); - return Ok(Vec::new()) + return Ok(Vec::new()); } log::info!(target: "runtime::session_keys", "Collecting pre-upgrade session keys state"); @@ -1588,7 +1592,7 @@ pub mod migrations { fn on_runtime_upgrade() -> Weight { if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { log::info!("Skipping session keys upgrade: already applied"); - return ::DbWeight::get().reads(1) + return ::DbWeight::get().reads(1); } log::trace!("Upgrading session keys"); Session::upgrade_keys::(transform_session_keys); @@ -1601,7 +1605,7 @@ pub mod migrations { ) -> Result<(), sp_runtime::TryRuntimeError> { if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { log::warn!(target: "runtime::session_keys", "Skipping session keys migration post-upgrade check due to spec version (already applied?)"); - return Ok(()) + return Ok(()); } let key_ids = SessionKeys::key_ids(); @@ -1785,6 +1789,37 @@ sp_api::impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + if !matches!(xcm_version, 3 | 4) { + return Err(XcmPaymentApiError::UnhandledXcmVersion); + } + Ok([VersionedAssetId::V4(xcm_config::TokenLocation::get().into())] + .into_iter() + .filter_map(|asset| asset.into_version(xcm_version).ok()) + .collect()) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + let local_asset = VersionedAssetId::V4(xcm_config::TokenLocation::get().into()); + let asset = asset + .into_version(4) + .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; + + if asset != local_asset { return Err(XcmPaymentApiError::AssetNotFound); } + + Ok(WeightToFee::weight_to_fee(&weight)) + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + XcmPallet::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + XcmPallet::query_delivery_fees(destination, message) + } + } + impl sp_api::Metadata for Runtime { fn metadata() -> OpaqueMetadata { OpaqueMetadata::new(Runtime::metadata().into()) @@ -2493,7 +2528,7 @@ mod remote_tests { #[tokio::test] async fn run_migrations() { if var("RUN_MIGRATION_TESTS").is_err() { - return + return; } sp_tracing::try_init_simple(); diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index fcead1dd0b5..587a6c9a590 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -114,6 +114,7 @@ runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parac xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api", default-features = false } [dev-dependencies] hex-literal = "0.4.1" @@ -227,6 +228,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] runtime-benchmarks = [ diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 12e6174ab6c..02397f35368 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -35,7 +35,7 @@ use frame_support::{ InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, ProcessMessage, ProcessMessageError, WithdrawReasons, }, - weights::{ConstantMultiplier, WeightMeter}, + weights::{ConstantMultiplier, WeightMeter, WeightToFee as _}, PalletId, }; use frame_system::{EnsureRoot, EnsureSigned}; @@ -101,11 +101,13 @@ use sp_std::{collections::btree_map::BTreeMap, prelude::*}; use sp_version::NativeVersion; use sp_version::RuntimeVersion; use xcm::{ - latest::{InteriorLocation, Junction, Junction::PalletInstance}, - VersionedLocation, + latest::prelude::*, IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, + VersionedXcm, }; use xcm_builder::PayOverXcm; +use xcm_fee_payment_runtime_api::Error as XcmPaymentApiError; + pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; pub use pallet_election_provider_multi_phase::{Call as EPMCall, GeometricDepositBase}; @@ -1667,11 +1669,11 @@ pub mod migrations { let now = frame_system::Pallet::::block_number(); let lease = slots::Pallet::::lease(para); if lease.is_empty() { - return None + return None; } // Lease not yet started, ignore: if lease.iter().any(Option::is_none) { - return None + return None; } let (index, _) = as Leaser>::lease_period_index(now)?; @@ -1693,7 +1695,7 @@ pub mod migrations { fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { log::warn!(target: "runtime::session_keys", "Skipping session keys migration pre-upgrade check due to spec version (already applied?)"); - return Ok(Vec::new()) + return Ok(Vec::new()); } log::info!(target: "runtime::session_keys", "Collecting pre-upgrade session keys state"); @@ -1722,7 +1724,7 @@ pub mod migrations { fn on_runtime_upgrade() -> Weight { if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { log::warn!("Skipping session keys upgrade: already applied"); - return ::DbWeight::get().reads(1) + return ::DbWeight::get().reads(1); } log::info!("Upgrading session keys"); Session::upgrade_keys::(transform_session_keys); @@ -1735,7 +1737,7 @@ pub mod migrations { ) -> Result<(), sp_runtime::TryRuntimeError> { if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { log::warn!(target: "runtime::session_keys", "Skipping session keys migration post-upgrade check due to spec version (already applied?)"); - return Ok(()) + return Ok(()); } let key_ids = SessionKeys::key_ids(); @@ -2332,6 +2334,37 @@ sp_api::impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + if !matches!(xcm_version, 3 | 4) { + return Err(XcmPaymentApiError::UnhandledXcmVersion); + } + Ok([VersionedAssetId::V4(xcm_config::TokenLocation::get().into())] + .into_iter() + .filter_map(|asset| asset.into_version(xcm_version).ok()) + .collect()) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + let local_asset = VersionedAssetId::V4(xcm_config::TokenLocation::get().into()); + let asset = asset + .into_version(4) + .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; + + if asset != local_asset { return Err(XcmPaymentApiError::AssetNotFound); } + + Ok(WeightToFee::weight_to_fee(&weight)) + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + XcmPallet::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + XcmPallet::query_delivery_fees(destination, message) + } + } + impl pallet_nomination_pools_runtime_api::NominationPoolsApi< Block, AccountId, @@ -2650,7 +2683,7 @@ mod remote_tests { #[tokio::test] async fn run_migrations() { if var("RUN_MIGRATION_TESTS").is_err() { - return + return; } sp_tracing::try_init_simple(); diff --git a/polkadot/runtime/westend/src/tests.rs b/polkadot/runtime/westend/src/tests.rs index 9f996316059..bdd599d2b75 100644 --- a/polkadot/runtime/westend/src/tests.rs +++ b/polkadot/runtime/westend/src/tests.rs @@ -21,7 +21,6 @@ use std::collections::HashSet; use crate::*; use frame_support::traits::WhitelistedStorageKeys; use sp_core::hexdisplay::HexDisplay; -use xcm::latest::prelude::*; #[test] fn remove_keys_weight_is_sensible() { diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 4840b6127f5..08307c34f8a 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -26,6 +26,7 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } +xcm-fee-payment-runtime-api = { path = "../xcm-fee-payment-runtime-api", default-features = false } # marked optional, used in benchmarking frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -54,6 +55,7 @@ std = [ "sp-std/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] runtime-benchmarks = [ diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 8a9e5288f2e..58a597de5ab 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -61,6 +61,7 @@ use xcm_executor::{ }, AssetsInHolding, }; +use xcm_fee_payment_runtime_api::Error as FeePaymentError; #[cfg(any(feature = "try-runtime", test))] use sp_runtime::TryRuntimeError; @@ -2363,6 +2364,37 @@ impl Pallet { AccountIdConversion::::into_account_truncating(&ID) } + pub fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + let message = + Xcm::<()>::try_from(message).map_err(|_| FeePaymentError::VersionedConversionFailed)?; + + T::Weigher::weight(&mut message.into()).map_err(|()| { + log::error!(target: "xcm::pallet_xcm::query_xcm_weight", "Error when querying XCM weight"); + FeePaymentError::WeightNotComputable + }) + } + + pub fn query_delivery_fees( + destination: VersionedLocation, + message: VersionedXcm<()>, + ) -> Result { + let result_version = destination.identify_version().max(message.identify_version()); + + let destination = + destination.try_into().map_err(|_| FeePaymentError::VersionedConversionFailed)?; + + let message = message.try_into().map_err(|_| FeePaymentError::VersionedConversionFailed)?; + + let (_, fees) = validate_send::(destination, message).map_err(|error| { + log::error!(target: "xcm::pallet_xcm::query_delivery_fees", "Error when querying delivery fees: {:?}", error); + FeePaymentError::Unroutable + })?; + + VersionedAssets::from(fees) + .into_version(result_version) + .map_err(|_| FeePaymentError::VersionedConversionFailed) + } + /// Create a new expectation of a query response with the querier being here. fn do_new_query( responder: impl Into, diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index ba8d726aecf..86a17fa1e88 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -443,6 +443,16 @@ impl IntoVersion for VersionedXcm { } } +impl IdentifyVersion for VersionedXcm { + fn identify_version(&self) -> Version { + match self { + Self::V2(_) => v2::VERSION, + Self::V3(_) => v3::VERSION, + Self::V4(_) => v4::VERSION, + } + } +} + impl From> for VersionedXcm { fn from(x: v2::Xcm) -> Self { VersionedXcm::V2(x) diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml b/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml new file mode 100644 index 00000000000..682642d13c3 --- /dev/null +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "xcm-fee-payment-runtime-api" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +repository.workspace = true +description = "XCM fee payment runtime API" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", +] } + +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", + "serde", +] } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-weights = { path = "../../../substrate/primitives/weights", default-features = false } +xcm = { package = "staging-xcm", path = "../", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "scale-info/std", + "sp-api/std", + "sp-runtime/std", + "sp-std/std", + "sp-weights/std", + "xcm/std", +] diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs b/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs new file mode 100644 index 00000000000..20bf9236f1f --- /dev/null +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs @@ -0,0 +1,99 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Runtime API definition for xcm transaction payment. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Decode, Encode}; +use frame_support::pallet_prelude::TypeInfo; +use sp_std::vec::Vec; +use sp_weights::Weight; +use xcm::{Version, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; + +sp_api::decl_runtime_apis! { + /// A trait of XCM payment API. + /// + /// API provides functionality for obtaining: + /// + /// * the weight required to execute an XCM message, + /// * a list of acceptable `AssetId`s for message execution payment, + /// * the cost of the weight in the specified acceptable `AssetId`. + /// * the fees for an XCM message delivery. + /// + /// To determine the execution weight of the calls required for + /// [`xcm::latest::Instruction::Transact`] instruction, `TransactionPaymentCallApi` can be used. + pub trait XcmPaymentApi { + /// Returns a list of acceptable payment assets. + /// + /// # Arguments + /// + /// * `xcm_version`: Version. + fn query_acceptable_payment_assets(xcm_version: Version) -> Result, Error>; + + /// Returns a weight needed to execute a XCM. + /// + /// # Arguments + /// + /// * `message`: `VersionedXcm`. + fn query_xcm_weight(message: VersionedXcm<()>) -> Result; + + /// Converts a weight into a fee for the specified `AssetId`. + /// + /// # Arguments + /// + /// * `weight`: convertible `Weight`. + /// * `asset`: `VersionedAssetId`. + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result; + + /// Get delivery fees for sending a specific `message` to a `destination`. + /// These always come in a specific asset, defined by the chain. + /// + /// # Arguments + /// * `message`: The message that'll be sent, necessary because most delivery fees are based on the + /// size of the message. + /// * `destination`: The destination to send the message to. Different destinations may use + /// different senders that charge different fees. + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result; + } +} + +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum Error { + /// An API part is unsupported. + #[codec(index = 0)] + Unimplemented, + + /// Converting a versioned data structure from one version to another failed. + #[codec(index = 1)] + VersionedConversionFailed, + + /// XCM message weight calculation failed. + #[codec(index = 2)] + WeightNotComputable, + + /// XCM version not able to be handled. + #[codec(index = 3)] + UnhandledXcmVersion, + + /// The given asset is not handled as a fee asset. + #[codec(index = 4)] + AssetNotFound, + + /// Destination is known to be unroutable. + #[codec(index = 5)] + Unroutable, +} diff --git a/prdoc/pr_3607.prdoc b/prdoc/pr_3607.prdoc new file mode 100644 index 00000000000..1a69b25ad25 --- /dev/null +++ b/prdoc/pr_3607.prdoc @@ -0,0 +1,26 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "XCM fee payment API" + +doc: + - audience: Runtime Dev + description: | + A runtime API was added for estimating the fees required for XCM execution and delivery. + This is the basic building block needed for UIs to accurately estimate fees. + An example implementation is shown in the PR. Ideally it's simple to implement, you only need to call existing parts of your XCM config. + The API looks like so: + ```rust + fn query_acceptable_payment_assets(xcm_version: Version) -> Result, Error>; + fn query_xcm_weight(message: VersionedXcm) -> Result; + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result; + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result; + ``` + The first three relate to XCM execution fees, given an XCM, you can query its weight, then which assets are acceptable for buying weight and convert weight to a number of those assets. + The last one takes in a destination and a message you want to send from the runtime you're executing this on, it will give you the delivery fees. + +crates: + - name: xcm-fee-payment-runtime-api + - name: rococo-runtime + - name: westend-runtime + -- GitLab From ed907819c64fd0a185ff4109dfc7512d6162f7e9 Mon Sep 17 00:00:00 2001 From: Derek Colley Date: Tue, 26 Mar 2024 22:31:15 +0000 Subject: [PATCH 036/128] add metaspan boot node to coretime-westend.json (#3781) --- cumulus/parachains/chain-specs/coretime-westend.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cumulus/parachains/chain-specs/coretime-westend.json b/cumulus/parachains/chain-specs/coretime-westend.json index c79fd582348..adb35b8a349 100644 --- a/cumulus/parachains/chain-specs/coretime-westend.json +++ b/cumulus/parachains/chain-specs/coretime-westend.json @@ -4,7 +4,8 @@ "chainType": "Live", "bootNodes": [ "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", - "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH" + "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", + "/dns/boot.metaspan.io/tcp/33019/p2p/12D3KooWCa1uNnEZqiqJY9jkKNQxwSLGPeZ5MjWHhjQMGwga9JMM" ], "telemetryEndpoints": null, "protocolId": null, -- GitLab From 3fc5b826534438313d03f6e0404db44099be6d9d Mon Sep 17 00:00:00 2001 From: ordian Date: Tue, 26 Mar 2024 23:59:47 +0100 Subject: [PATCH 037/128] fix regression in approval-voting introduced in #3747 (#3831) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #3826. The docs on the `candidates` field of `BlockEntry` were incorrectly stating that they are sorted by core index. The (incorrect) optimization was introduced in #3747 based on this assumption. The actual ordering is based on `CandidateIncluded` events ordering in the runtime. We revert this optimization here. - [x] verify the underlying issue - [x] add a regression test --------- Co-authored-by: Bastian Köcher --- polkadot/node/core/approval-voting/src/lib.rs | 6 +- .../approval-voting/src/persisted_entries.rs | 2 +- .../node/core/approval-voting/src/tests.rs | 167 ++++++++++++++++++ 3 files changed, 171 insertions(+), 4 deletions(-) diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 1a62c9ee55e..76b3d476e28 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -1285,10 +1285,10 @@ fn cores_to_candidate_indices( // Map from core index to candidate index. for claimed_core_index in core_indices.iter_ones() { - // Candidates are sorted by core index. - if let Ok(candidate_index) = block_entry + if let Some(candidate_index) = block_entry .candidates() - .binary_search_by_key(&(claimed_core_index as u32), |(core_index, _)| core_index.0) + .iter() + .position(|(core_index, _)| core_index.0 == claimed_core_index as u32) { candidate_indices.push(candidate_index as _); } diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs index b924a1b52cc..6eeb99cb99f 100644 --- a/polkadot/node/core/approval-voting/src/persisted_entries.rs +++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs @@ -454,7 +454,7 @@ pub struct BlockEntry { slot: Slot, relay_vrf_story: RelayVRFStory, // The candidates included as-of this block and the index of the core they are - // leaving. Sorted ascending by core index. + // leaving. candidates: Vec<(CoreIndex, CandidateHash)>, // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`. // The i'th bit is `true` iff the candidate has been approved in the context of this diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index a3013eab46d..1483af56585 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -2479,6 +2479,173 @@ fn subsystem_import_checked_approval_sets_one_block_bit_at_a_time() { }); } +// See https://github.com/paritytech/polkadot-sdk/issues/3826 +#[test] +fn inclusion_events_can_be_unordered_by_core_index() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + for core in 0..3 { + let _ = assignments.insert( + CoreIndex(core), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2( + AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), + }, + ), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + } + assignments + }, + |_| Ok(0), + )); + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { + mut virtual_overseer, + clock, + sync_oracle_handle: _sync_oracle_handle, + .. + } = test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + + let candidate_receipt0 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(0_u32); + receipt + }; + let candidate_receipt1 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(1_u32); + receipt + }; + let candidate_receipt2 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(2_u32); + receipt + }; + let candidate_index0 = 0; + let candidate_index1 = 1; + let candidate_index2 = 2; + + let validator0 = ValidatorIndex(0); + let validator1 = ValidatorIndex(1); + let validator2 = ValidatorIndex(2); + let validator3 = ValidatorIndex(3); + + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Eve, + ]; + let session_info = SessionInfo { + validator_groups: IndexedVec::>::from(vec![ + vec![validator0, validator1], + vec![validator2], + vec![validator3], + ]), + needed_approvals: 1, + zeroth_delay_tranche_width: 1, + relay_vrf_modulo_samples: 1, + n_delay_tranches: 1, + no_show_slots: 1, + ..session_info(&validators) + }; + + ChainBuilder::new() + .add_block( + block_hash, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot: Slot::from(0), + candidates: Some(vec![ + (candidate_receipt0.clone(), CoreIndex(2), GroupIndex(2)), + (candidate_receipt1.clone(), CoreIndex(1), GroupIndex(0)), + (candidate_receipt2.clone(), CoreIndex(0), GroupIndex(1)), + ]), + session_info: Some(session_info), + end_syncing: true, + }, + ) + .build(&mut virtual_overseer) + .await; + + assert_eq!(clock.inner.lock().next_wakeup().unwrap(), 2); + clock.inner.lock().wakeup_all(100); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(100)).await; + + // Assignment is distributed only once from `approval-voting` + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + c_indices, + )) => { + assert_eq!(c_indices, vec![candidate_index0, candidate_index1, candidate_index2].try_into().unwrap()); + } + ); + + // Candidate 0 + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + // Candidate 1 + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + // Candidate 2 + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + // Check if assignment was triggered for candidate 0. + let candidate_entry = + store.load_candidate_entry(&candidate_receipt0.hash()).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + // Check if assignment was triggered for candidate 1. + let candidate_entry = + store.load_candidate_entry(&candidate_receipt1.hash()).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + // Check if assignment was triggered for candidate 2. + let candidate_entry = + store.load_candidate_entry(&candidate_receipt2.hash()).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + virtual_overseer + }); +} + fn approved_ancestor_test( skip_approval: impl Fn(BlockNumber) -> bool, approved_height: BlockNumber, -- GitLab From f394477988ac8b610862565c31dc7d3a96d73122 Mon Sep 17 00:00:00 2001 From: Javier Viola <363911+pepoviola@users.noreply.github.com> Date: Wed, 27 Mar 2024 03:45:29 +0100 Subject: [PATCH 038/128] chore: bump zombienet version (#3830) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit bump version to `1.3.97` (follow up from https://github.com/paritytech/polkadot-sdk/pull/3805) --------- Co-authored-by: Bastian Köcher --- .gitlab/pipeline/zombienet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 8d308714fab..82341eb709f 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,7 +1,7 @@ .zombienet-refs: extends: .build-refs variables: - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.95" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.98" include: # substrate tests -- GitLab From 66051adb619d2119771920218e2de75fa037d7e8 Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Wed, 27 Mar 2024 06:16:03 +0200 Subject: [PATCH 039/128] testnet genesis: enable approval voting v2 assignments and coalescing (#3827) This is a long due chore ... --------- Signed-off-by: Andrei Sandu Co-authored-by: ordian --- polkadot/node/service/src/chain_spec.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index 1c44b17b6fd..c03ce1db094 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -123,7 +123,8 @@ fn default_parachains_host_configuration( ) -> polkadot_runtime_parachains::configuration::HostConfiguration { use polkadot_primitives::{ - vstaging::node_features::FeatureIndex, AsyncBackingParams, MAX_CODE_SIZE, MAX_POV_SIZE, + vstaging::{node_features::FeatureIndex, ApprovalVotingParams}, + AsyncBackingParams, MAX_CODE_SIZE, MAX_POV_SIZE, }; polkadot_runtime_parachains::configuration::HostConfiguration { @@ -158,7 +159,8 @@ fn default_parachains_host_configuration( allowed_ancestry_len: 2, }, node_features: bitvec::vec::BitVec::from_element( - 1u8 << (FeatureIndex::ElasticScalingMVP as usize), + 1u8 << (FeatureIndex::ElasticScalingMVP as usize) | + 1u8 << (FeatureIndex::EnableAssignmentsV2 as usize), ), scheduler_params: SchedulerParams { lookahead: 2, @@ -166,6 +168,7 @@ fn default_parachains_host_configuration( paras_availability_period: 4, ..Default::default() }, + approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 5 }, ..Default::default() } } -- GitLab From feee773d15d5237765b520b03854d46652181de5 Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Wed, 27 Mar 2024 09:31:01 +0100 Subject: [PATCH 040/128] pallet-xcm: Deprecate `execute` and `send` in favor of `execute_blob` and `send_blob` (#3749) `execute` and `send` try to decode the xcm in the parameters before reaching the filter line. The new extrinsics decode only after the filter line. These should be used instead of the old ones. ## TODO - [x] Tests - [x] Generate weights - [x] Deprecation issue -> https://github.com/paritytech/polkadot-sdk/issues/3771 - [x] PRDoc - [x] Handle error in pallet-contracts This would make writing XCMs in PJS Apps more difficult, but here's the fix for that: https://github.com/polkadot-js/apps/pull/10350. Already deployed! https://polkadot.js.org/apps/#/utilities/xcm Supersedes https://github.com/paritytech/polkadot-sdk/pull/1798/ --------- Co-authored-by: PG Herveou Co-authored-by: command-bot <> Co-authored-by: Adrian Catangiu --- Cargo.lock | 1 + .../emulated/chains/relays/westend/Cargo.toml | 1 - .../emulated/common/src/impls.rs | 6 +- .../assets/asset-hub-rococo/src/tests/send.rs | 8 +- .../assets/asset-hub-rococo/src/tests/swap.rs | 4 +- .../asset-hub-westend/src/tests/send.rs | 8 +- .../asset-hub-westend/src/tests/swap.rs | 4 +- .../bridge-hub-rococo/src/tests/send_xcm.rs | 7 +- .../bridge-hub-rococo/src/tests/snowbridge.rs | 18 +- .../bridges/bridge-hub-westend/Cargo.toml | 1 + .../bridge-hub-westend/src/tests/send_xcm.rs | 7 +- .../src/weights/pallet_xcm.rs | 112 ++++++---- .../src/weights/pallet_xcm.rs | 114 ++++++---- .../src/weights/pallet_xcm.rs | 108 ++++++---- .../src/weights/pallet_xcm.rs | 108 ++++++---- .../src/weights/pallet_xcm.rs | 106 +++++---- .../coretime-rococo/src/weights/pallet_xcm.rs | 102 +++++---- .../src/weights/pallet_xcm.rs | 102 +++++---- .../people-rococo/src/weights/pallet_xcm.rs | 102 +++++---- .../people-westend/src/weights/pallet_xcm.rs | 102 +++++---- polkadot/runtime/rococo/src/impls.rs | 9 +- .../runtime/rococo/src/weights/pallet_xcm.rs | 110 ++++++---- polkadot/runtime/westend/src/impls.rs | 9 +- .../runtime/westend/src/weights/pallet_xcm.rs | 108 ++++++---- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 29 +++ polkadot/xcm/pallet-xcm/src/lib.rs | 202 +++++++++++++----- polkadot/xcm/pallet-xcm/src/tests/mod.rs | 77 ++++--- polkadot/xcm/src/lib.rs | 3 + polkadot/xcm/src/v4/mod.rs | 20 +- polkadot/xcm/xcm-builder/src/controller.rs | 39 ++-- polkadot/xcm/xcm-builder/src/lib.rs | 2 +- prdoc/pr_3749.prdoc | 47 ++++ .../frame/contracts/mock-network/src/tests.rs | 40 +--- substrate/frame/contracts/src/lib.rs | 3 + substrate/frame/contracts/src/wasm/runtime.rs | 54 +---- substrate/frame/contracts/uapi/src/host.rs | 2 +- 36 files changed, 1133 insertions(+), 642 deletions(-) create mode 100644 prdoc/pr_3749.prdoc diff --git a/Cargo.lock b/Cargo.lock index f246a978fd2..2382dc8d162 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2148,6 +2148,7 @@ dependencies = [ "pallet-message-queue", "pallet-xcm", "parachains-common", + "parity-scale-codec", "rococo-westend-system-emulated-network", "sp-runtime", "staging-xcm", diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml index 20aedb50e6a..12a3ad60e0e 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml @@ -11,7 +11,6 @@ publish = false workspace = true [dependencies] - # Substrate sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index ae69bf991e5..618c3addc5d 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -362,7 +362,7 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { recipient: $crate::impls::ParaId, call: $crate::impls::DoubleEncoded<()> ) { - use $crate::impls::{bx, Chain, RelayChain}; + use $crate::impls::{bx, Chain, RelayChain, Encode}; ::execute_with(|| { let root_origin = ::RuntimeOrigin::root(); @@ -370,10 +370,10 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { let xcm = $crate::impls::xcm_transact_unpaid_execution(call, $crate::impls::OriginKind::Superuser); // Send XCM `Transact` - $crate::impls::assert_ok!(]>::XcmPallet::send( + $crate::impls::assert_ok!(]>::XcmPallet::send_blob( root_origin, bx!(destination.into()), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); Self::assert_xcm_pallet_sent(); }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs index 364fbd0d439..1d120f1dc4c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs @@ -75,10 +75,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( root_origin, bx!(system_para_destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); @@ -159,10 +159,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( root_origin, bx!(system_para_destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs index 87f0b3d9f90..e13300b7c11 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -370,10 +370,10 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { penpal.clone(), ); - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( penpal_root, bx!(asset_hub_location), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs index eb0e985cc0c..f218b539c38 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs @@ -75,10 +75,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( root_origin, bx!(system_para_destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); @@ -159,10 +159,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( root_origin, bx!(system_para_destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index 04740d31158..aa673c03483 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -369,10 +369,10 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { penpal.clone(), ); - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( penpal_root, bx!(asset_hub_location), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index a1d871cdb61..4bd041dc03f 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -14,6 +14,7 @@ // limitations under the License. use crate::tests::*; +use codec::Encode; #[test] fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable() { @@ -26,7 +27,7 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable let remote_xcm = Xcm(vec![ClearOrigin]); - let xcm = VersionedXcm::from(Xcm(vec![ + let xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { network: WestendId.into(), @@ -38,10 +39,10 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin, bx!(destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index 26b82375e07..caaf24e00a8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -83,7 +83,7 @@ fn create_agent() { let create_agent_call = SnowbridgeControl::Control(ControlCall::CreateAgent {}); // Construct XCM to create an agent for para 1001 - let remote_xcm = VersionedXcm::from(Xcm(vec![ + let remote_xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -96,10 +96,10 @@ fn create_agent() { // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin, bx!(destination), - bx!(remote_xcm), + remote_xcm.encode().try_into().unwrap(), )); type RuntimeEvent = ::RuntimeEvent; @@ -141,7 +141,7 @@ fn create_channel() { let create_agent_call = SnowbridgeControl::Control(ControlCall::CreateAgent {}); // Construct XCM to create an agent for para 1001 - let create_agent_xcm = VersionedXcm::from(Xcm(vec![ + let create_agent_xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -154,7 +154,7 @@ fn create_channel() { let create_channel_call = SnowbridgeControl::Control(ControlCall::CreateChannel { mode: OperatingMode::Normal }); // Construct XCM to create a channel for para 1001 - let create_channel_xcm = VersionedXcm::from(Xcm(vec![ + let create_channel_xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -167,16 +167,16 @@ fn create_channel() { // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin.clone(), bx!(destination.clone()), - bx!(create_agent_xcm), + create_agent_xcm.encode().try_into().unwrap(), )); - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin, bx!(destination), - bx!(create_channel_xcm), + create_channel_xcm.encode().try_into().unwrap(), )); type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index 9059d841a48..9c45a7adeb4 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -11,6 +11,7 @@ publish = false workspace = true [dependencies] +codec = { package = "parity-scale-codec", version = "3.6.0" } # Substrate frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index b01be5e8dc8..f69747c1770 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -14,6 +14,7 @@ // limitations under the License. use crate::tests::*; +use codec::Encode; #[test] fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable() { @@ -26,7 +27,7 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable let remote_xcm = Xcm(vec![ClearOrigin]); - let xcm = VersionedXcm::from(Xcm(vec![ + let xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { network: RococoId, @@ -38,10 +39,10 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable // Westend Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Westend::execute_with(|| { - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin, bx!(destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs index 51b6543bae8..e0e231d7da2 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 22_136_000 picoseconds. - Weight::from_parts(22_518_000, 0) + // Minimum execution time: 21_224_000 picoseconds. + Weight::from_parts(21_821_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 21_474_000 picoseconds. + Weight::from_parts(22_072_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 92_277_000 picoseconds. - Weight::from_parts(94_843_000, 0) + // Minimum execution time: 90_677_000 picoseconds. + Weight::from_parts(93_658_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -118,8 +140,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `400` // Estimated: `6196` - // Minimum execution time: 120_110_000 picoseconds. - Weight::from_parts(122_968_000, 0) + // Minimum execution time: 116_767_000 picoseconds. + Weight::from_parts(118_843_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -148,8 +170,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 143_116_000 picoseconds. - Weight::from_parts(147_355_000, 0) + // Minimum execution time: 137_983_000 picoseconds. + Weight::from_parts(141_396_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -164,14 +186,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_517_000 picoseconds. - Weight::from_parts(6_756_000, 0) + // Minimum execution time: 6_232_000 picoseconds. + Weight::from_parts(6_507_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -181,8 +213,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_894_000 picoseconds. - Weight::from_parts(2_024_000, 0) + // Minimum execution time: 1_884_000 picoseconds. + Weight::from_parts(2_016_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -208,8 +240,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 27_314_000 picoseconds. - Weight::from_parts(28_787_000, 0) + // Minimum execution time: 26_637_000 picoseconds. + Weight::from_parts(27_616_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -234,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 29_840_000 picoseconds. - Weight::from_parts(30_589_000, 0) + // Minimum execution time: 28_668_000 picoseconds. + Weight::from_parts(29_413_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -246,8 +278,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_893_000 picoseconds. - Weight::from_parts(2_017_000, 0) + // Minimum execution time: 1_990_000 picoseconds. + Weight::from_parts(2_114_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -257,8 +289,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `13524` - // Minimum execution time: 19_211_000 picoseconds. - Weight::from_parts(19_552_000, 0) + // Minimum execution time: 18_856_000 picoseconds. + Weight::from_parts(19_430_000, 0) .saturating_add(Weight::from_parts(0, 13524)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -269,8 +301,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `163` // Estimated: `13528` - // Minimum execution time: 19_177_000 picoseconds. - Weight::from_parts(19_704_000, 0) + // Minimum execution time: 19_068_000 picoseconds. + Weight::from_parts(19_434_000, 0) .saturating_add(Weight::from_parts(0, 13528)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -281,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `16013` - // Minimum execution time: 20_449_000 picoseconds. - Weight::from_parts(21_075_000, 0) + // Minimum execution time: 21_055_000 picoseconds. + Weight::from_parts(21_379_000, 0) .saturating_add(Weight::from_parts(0, 16013)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -304,8 +336,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 26_578_000 picoseconds. - Weight::from_parts(27_545_000, 0) + // Minimum execution time: 25_736_000 picoseconds. + Weight::from_parts(26_423_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -316,8 +348,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `11096` - // Minimum execution time: 11_646_000 picoseconds. - Weight::from_parts(11_944_000, 0) + // Minimum execution time: 11_853_000 picoseconds. + Weight::from_parts(12_215_000, 0) .saturating_add(Weight::from_parts(0, 11096)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -327,8 +359,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `170` // Estimated: `13535` - // Minimum execution time: 19_301_000 picoseconds. - Weight::from_parts(19_664_000, 0) + // Minimum execution time: 19_418_000 picoseconds. + Weight::from_parts(19_794_000, 0) .saturating_add(Weight::from_parts(0, 13535)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -351,8 +383,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `13577` - // Minimum execution time: 35_715_000 picoseconds. - Weight::from_parts(36_915_000, 0) + // Minimum execution time: 34_719_000 picoseconds. + Weight::from_parts(35_260_000, 0) .saturating_add(Weight::from_parts(0, 13577)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -365,8 +397,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_871_000 picoseconds. - Weight::from_parts(5_066_000, 0) + // Minimum execution time: 4_937_000 picoseconds. + Weight::from_parts(5_203_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -377,8 +409,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 25_150_000 picoseconds. - Weight::from_parts(26_119_000, 0) + // Minimum execution time: 26_064_000 picoseconds. + Weight::from_parts(26_497_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -389,8 +421,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 38_248_000 picoseconds. - Weight::from_parts(39_122_000, 0) + // Minimum execution time: 37_132_000 picoseconds. + Weight::from_parts(37_868_000, 0) .saturating_add(Weight::from_parts(0, 3625)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index 71facff87d3..299e4b8b3cd 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 21_630_000 picoseconds. - Weight::from_parts(22_306_000, 0) + // Minimum execution time: 21_722_000 picoseconds. + Weight::from_parts(22_253_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 21_694_000 picoseconds. + Weight::from_parts(22_326_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 91_802_000 picoseconds. - Weight::from_parts(93_672_000, 0) + // Minimum execution time: 94_422_000 picoseconds. + Weight::from_parts(96_997_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -118,8 +140,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `367` // Estimated: `6196` - // Minimum execution time: 118_930_000 picoseconds. - Weight::from_parts(122_306_000, 0) + // Minimum execution time: 123_368_000 picoseconds. + Weight::from_parts(125_798_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -148,8 +170,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 140_527_000 picoseconds. - Weight::from_parts(144_501_000, 0) + // Minimum execution time: 142_033_000 picoseconds. + Weight::from_parts(145_702_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -158,8 +180,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_556_000 picoseconds. - Weight::from_parts(7_798_000, 0) + // Minimum execution time: 7_558_000 picoseconds. + Weight::from_parts(7_916_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_978_000 picoseconds. + Weight::from_parts(8_210_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) @@ -168,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_373_000 picoseconds. - Weight::from_parts(6_603_000, 0) + // Minimum execution time: 6_439_000 picoseconds. + Weight::from_parts(6_711_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_941_000 picoseconds. - Weight::from_parts(2_088_000, 0) + // Minimum execution time: 1_982_000 picoseconds. + Weight::from_parts(2_260_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -206,8 +236,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 27_080_000 picoseconds. - Weight::from_parts(27_820_000, 0) + // Minimum execution time: 27_120_000 picoseconds. + Weight::from_parts(28_048_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -232,8 +262,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 28_850_000 picoseconds. - Weight::from_parts(29_506_000, 0) + // Minimum execution time: 29_354_000 picoseconds. + Weight::from_parts(30_205_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -244,8 +274,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_033_000 picoseconds. - Weight::from_parts(2_201_000, 0) + // Minimum execution time: 1_926_000 picoseconds. + Weight::from_parts(2_013_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -255,8 +285,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `13524` - // Minimum execution time: 18_844_000 picoseconds. - Weight::from_parts(19_197_000, 0) + // Minimum execution time: 18_611_000 picoseconds. + Weight::from_parts(19_120_000, 0) .saturating_add(Weight::from_parts(0, 13524)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -267,8 +297,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `163` // Estimated: `13528` - // Minimum execution time: 18_940_000 picoseconds. - Weight::from_parts(19_450_000, 0) + // Minimum execution time: 18_373_000 picoseconds. + Weight::from_parts(18_945_000, 0) .saturating_add(Weight::from_parts(0, 13528)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -279,8 +309,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `16013` - // Minimum execution time: 20_521_000 picoseconds. - Weight::from_parts(21_076_000, 0) + // Minimum execution time: 20_459_000 picoseconds. + Weight::from_parts(20_951_000, 0) .saturating_add(Weight::from_parts(0, 16013)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -302,8 +332,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 26_007_000 picoseconds. - Weight::from_parts(26_448_000, 0) + // Minimum execution time: 26_003_000 picoseconds. + Weight::from_parts(26_678_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -314,8 +344,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `11096` - // Minimum execution time: 11_584_000 picoseconds. - Weight::from_parts(12_080_000, 0) + // Minimum execution time: 11_557_000 picoseconds. + Weight::from_parts(11_868_000, 0) .saturating_add(Weight::from_parts(0, 11096)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `170` // Estimated: `13535` - // Minimum execution time: 19_157_000 picoseconds. - Weight::from_parts(19_513_000, 0) + // Minimum execution time: 18_710_000 picoseconds. + Weight::from_parts(19_240_000, 0) .saturating_add(Weight::from_parts(0, 13535)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -349,8 +379,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `13577` - // Minimum execution time: 34_878_000 picoseconds. - Weight::from_parts(35_623_000, 0) + // Minimum execution time: 34_393_000 picoseconds. + Weight::from_parts(35_138_000, 0) .saturating_add(Weight::from_parts(0, 13577)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -363,8 +393,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 3_900_000 picoseconds. - Weight::from_parts(4_161_000, 0) + // Minimum execution time: 4_043_000 picoseconds. + Weight::from_parts(4_216_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -375,8 +405,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 25_731_000 picoseconds. - Weight::from_parts(26_160_000, 0) + // Minimum execution time: 25_410_000 picoseconds. + Weight::from_parts(26_019_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -387,8 +417,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 37_251_000 picoseconds. - Weight::from_parts(38_075_000, 0) + // Minimum execution time: 38_850_000 picoseconds. + Weight::from_parts(39_593_000, 0) .saturating_add(Weight::from_parts(0, 3625)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs index a732e1a5734..adfaa9ea202 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 18_513_000 picoseconds. - Weight::from_parts(19_156_000, 0) + // Minimum execution time: 18_732_000 picoseconds. + Weight::from_parts(19_386_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 18_943_000 picoseconds. + Weight::from_parts(19_455_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 88_096_000 picoseconds. - Weight::from_parts(89_732_000, 0) + // Minimum execution time: 88_917_000 picoseconds. + Weight::from_parts(91_611_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 88_239_000 picoseconds. - Weight::from_parts(89_729_000, 0) + // Minimum execution time: 88_587_000 picoseconds. + Weight::from_parts(90_303_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -142,14 +164,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_955_000 picoseconds. - Weight::from_parts(6_266_000, 0) + // Minimum execution time: 5_856_000 picoseconds. + Weight::from_parts(6_202_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +191,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(1_961_000, 0) + // Minimum execution time: 1_797_000 picoseconds. + Weight::from_parts(1_970_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +218,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 24_388_000 picoseconds. - Weight::from_parts(25_072_000, 0) + // Minimum execution time: 24_479_000 picoseconds. + Weight::from_parts(25_058_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +244,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_762_000 picoseconds. - Weight::from_parts(27_631_000, 0) + // Minimum execution time: 27_282_000 picoseconds. + Weight::from_parts(27_924_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,8 +256,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_856_000 picoseconds. - Weight::from_parts(2_033_000, 0) + // Minimum execution time: 1_801_000 picoseconds. + Weight::from_parts(1_988_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -235,8 +267,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 17_718_000 picoseconds. - Weight::from_parts(18_208_000, 0) + // Minimum execution time: 16_509_000 picoseconds. + Weight::from_parts(16_939_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -247,8 +279,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 17_597_000 picoseconds. - Weight::from_parts(18_090_000, 0) + // Minimum execution time: 16_140_000 picoseconds. + Weight::from_parts(16_843_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -259,8 +291,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 19_533_000 picoseconds. - Weight::from_parts(20_164_000, 0) + // Minimum execution time: 18_160_000 picoseconds. + Weight::from_parts(18_948_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -282,8 +314,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 24_958_000 picoseconds. - Weight::from_parts(25_628_000, 0) + // Minimum execution time: 24_409_000 picoseconds. + Weight::from_parts(25_261_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -294,8 +326,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 12_209_000 picoseconds. - Weight::from_parts(12_612_000, 0) + // Minimum execution time: 10_848_000 picoseconds. + Weight::from_parts(11_241_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -305,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 17_844_000 picoseconds. - Weight::from_parts(18_266_000, 0) + // Minimum execution time: 16_609_000 picoseconds. + Weight::from_parts(17_044_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +361,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 34_131_000 picoseconds. - Weight::from_parts(34_766_000, 0) + // Minimum execution time: 32_500_000 picoseconds. + Weight::from_parts(33_475_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -343,8 +375,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_525_000 picoseconds. - Weight::from_parts(3_724_000, 0) + // Minimum execution time: 3_484_000 picoseconds. + Weight::from_parts(3_673_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +387,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_975_000 picoseconds. - Weight::from_parts(25_517_000, 0) + // Minimum execution time: 25_225_000 picoseconds. + Weight::from_parts(25_731_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +399,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 33_761_000 picoseconds. - Weight::from_parts(34_674_000, 0) + // Minimum execution time: 33_961_000 picoseconds. + Weight::from_parts(34_818_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs index a78ff2355ef..9cf4c61466a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 19_527_000 picoseconds. - Weight::from_parts(19_839_000, 0) + // Minimum execution time: 19_702_000 picoseconds. + Weight::from_parts(20_410_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 19_525_000 picoseconds. + Weight::from_parts(20_071_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 90_938_000 picoseconds. - Weight::from_parts(92_822_000, 0) + // Minimum execution time: 91_793_000 picoseconds. + Weight::from_parts(93_761_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 90_133_000 picoseconds. - Weight::from_parts(92_308_000, 0) + // Minimum execution time: 91_819_000 picoseconds. + Weight::from_parts(93_198_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -142,14 +164,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_205_000 picoseconds. - Weight::from_parts(6_595_000, 0) + // Minimum execution time: 6_183_000 picoseconds. + Weight::from_parts(6_598_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +191,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_927_000 picoseconds. - Weight::from_parts(2_062_000, 0) + // Minimum execution time: 1_987_000 picoseconds. + Weight::from_parts(2_076_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +218,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_078_000 picoseconds. - Weight::from_parts(25_782_000, 0) + // Minimum execution time: 25_375_000 picoseconds. + Weight::from_parts(26_165_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +244,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 28_188_000 picoseconds. - Weight::from_parts(28_826_000, 0) + // Minimum execution time: 28_167_000 picoseconds. + Weight::from_parts(28_792_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,8 +256,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_886_000 picoseconds. - Weight::from_parts(1_991_000, 0) + // Minimum execution time: 2_039_000 picoseconds. + Weight::from_parts(2_211_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -235,8 +267,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 17_443_000 picoseconds. - Weight::from_parts(17_964_000, 0) + // Minimum execution time: 17_127_000 picoseconds. + Weight::from_parts(17_519_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -247,8 +279,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 17_357_000 picoseconds. - Weight::from_parts(18_006_000, 0) + // Minimum execution time: 16_701_000 picoseconds. + Weight::from_parts(17_250_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -259,8 +291,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_838_000 picoseconds. - Weight::from_parts(19_688_000, 0) + // Minimum execution time: 18_795_000 picoseconds. + Weight::from_parts(19_302_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -282,8 +314,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 25_517_000 picoseconds. - Weight::from_parts(26_131_000, 0) + // Minimum execution time: 25_007_000 picoseconds. + Weight::from_parts(25_786_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -294,8 +326,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_587_000 picoseconds. - Weight::from_parts(11_963_000, 0) + // Minimum execution time: 11_534_000 picoseconds. + Weight::from_parts(11_798_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -305,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 17_490_000 picoseconds. - Weight::from_parts(18_160_000, 0) + // Minimum execution time: 17_357_000 picoseconds. + Weight::from_parts(17_629_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +361,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 34_088_000 picoseconds. - Weight::from_parts(34_598_000, 0) + // Minimum execution time: 33_487_000 picoseconds. + Weight::from_parts(34_033_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -343,8 +375,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_566_000 picoseconds. - Weight::from_parts(3_754_000, 0) + // Minimum execution time: 3_688_000 picoseconds. + Weight::from_parts(3_854_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +387,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_078_000 picoseconds. - Weight::from_parts(25_477_000, 0) + // Minimum execution time: 26_336_000 picoseconds. + Weight::from_parts(26_873_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +399,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_661_000 picoseconds. - Weight::from_parts(35_411_000, 0) + // Minimum execution time: 34_633_000 picoseconds. + Weight::from_parts(35_171_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs index 5d427d85004..0edd5dfff2b 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 21_813_000 picoseconds. - Weight::from_parts(22_332_000, 0) + // Minimum execution time: 21_911_000 picoseconds. + Weight::from_parts(22_431_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 22_143_000 picoseconds. + Weight::from_parts(22_843_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 93_243_000 picoseconds. - Weight::from_parts(95_650_000, 0) + // Minimum execution time: 96_273_000 picoseconds. + Weight::from_parts(98_351_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 96_199_000 picoseconds. - Weight::from_parts(98_620_000, 0) + // Minimum execution time: 95_571_000 picoseconds. + Weight::from_parts(96_251_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -142,14 +164,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_442_000 picoseconds. - Weight::from_parts(6_682_000, 0) + // Minimum execution time: 6_227_000 picoseconds. + Weight::from_parts(6_419_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +191,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_833_000 picoseconds. - Weight::from_parts(1_973_000, 0) + // Minimum execution time: 1_851_000 picoseconds. + Weight::from_parts(1_940_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +218,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 27_318_000 picoseconds. - Weight::from_parts(28_224_000, 0) + // Minimum execution time: 27_449_000 picoseconds. + Weight::from_parts(28_513_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +244,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 29_070_000 picoseconds. - Weight::from_parts(30_205_000, 0) + // Minimum execution time: 29_477_000 picoseconds. + Weight::from_parts(30_251_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,8 +256,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_904_000 picoseconds. - Weight::from_parts(2_033_000, 0) + // Minimum execution time: 1_894_000 picoseconds. + Weight::from_parts(2_009_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -235,8 +267,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `13524` - // Minimum execution time: 18_348_000 picoseconds. - Weight::from_parts(18_853_000, 0) + // Minimum execution time: 17_991_000 picoseconds. + Weight::from_parts(18_651_000, 0) .saturating_add(Weight::from_parts(0, 13524)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -247,8 +279,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `163` // Estimated: `13528` - // Minimum execution time: 17_964_000 picoseconds. - Weight::from_parts(18_548_000, 0) + // Minimum execution time: 18_321_000 picoseconds. + Weight::from_parts(18_701_000, 0) .saturating_add(Weight::from_parts(0, 13528)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -259,8 +291,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `16013` - // Minimum execution time: 19_708_000 picoseconds. - Weight::from_parts(20_157_000, 0) + // Minimum execution time: 19_762_000 picoseconds. + Weight::from_parts(20_529_000, 0) .saturating_add(Weight::from_parts(0, 16013)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -282,8 +314,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 26_632_000 picoseconds. - Weight::from_parts(27_314_000, 0) + // Minimum execution time: 26_927_000 picoseconds. + Weight::from_parts(27_629_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -294,8 +326,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `11096` - // Minimum execution time: 11_929_000 picoseconds. - Weight::from_parts(12_304_000, 0) + // Minimum execution time: 11_957_000 picoseconds. + Weight::from_parts(12_119_000, 0) .saturating_add(Weight::from_parts(0, 11096)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -305,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `170` // Estimated: `13535` - // Minimum execution time: 18_599_000 picoseconds. - Weight::from_parts(19_195_000, 0) + // Minimum execution time: 17_942_000 picoseconds. + Weight::from_parts(18_878_000, 0) .saturating_add(Weight::from_parts(0, 13535)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +361,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `13577` - // Minimum execution time: 35_524_000 picoseconds. - Weight::from_parts(36_272_000, 0) + // Minimum execution time: 35_640_000 picoseconds. + Weight::from_parts(36_340_000, 0) .saturating_add(Weight::from_parts(0, 13577)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -344,7 +376,7 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Measured: `103` // Estimated: `1588` // Minimum execution time: 4_044_000 picoseconds. - Weight::from_parts(4_238_000, 0) + Weight::from_parts(4_229_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +387,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 25_741_000 picoseconds. - Weight::from_parts(26_301_000, 0) + // Minimum execution time: 26_262_000 picoseconds. + Weight::from_parts(26_842_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +399,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 35_925_000 picoseconds. - Weight::from_parts(36_978_000, 0) + // Minimum execution time: 36_775_000 picoseconds. + Weight::from_parts(37_265_000, 0) .saturating_add(Weight::from_parts(0, 3625)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs index c5d315467c1..df0044089c8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,8 +62,28 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 35_051_000 picoseconds. - Weight::from_parts(35_200_000, 0) + // Minimum execution time: 18_767_000 picoseconds. + Weight::from_parts(19_420_000, 0) + .saturating_add(Weight::from_parts(0, 3539)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `74` + // Estimated: `3539` + // Minimum execution time: 19_184_000 picoseconds. + Weight::from_parts(19_695_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -84,8 +104,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 56_235_000 picoseconds. - Weight::from_parts(58_178_000, 0) + // Minimum execution time: 58_120_000 picoseconds. + Weight::from_parts(59_533_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -120,14 +140,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_226_000 picoseconds. - Weight::from_parts(6_403_000, 0) + // Minimum execution time: 6_074_000 picoseconds. + Weight::from_parts(6_398_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -137,8 +167,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_020_000 picoseconds. - Weight::from_parts(2_100_000, 0) + // Minimum execution time: 2_036_000 picoseconds. + Weight::from_parts(2_180_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +192,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 24_387_000 picoseconds. - Weight::from_parts(24_814_000, 0) + // Minimum execution time: 25_014_000 picoseconds. + Weight::from_parts(25_374_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -186,8 +216,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 27_039_000 picoseconds. - Weight::from_parts(27_693_000, 0) + // Minimum execution time: 27_616_000 picoseconds. + Weight::from_parts(28_499_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -198,8 +228,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_920_000 picoseconds. - Weight::from_parts(2_082_000, 0) + // Minimum execution time: 2_061_000 picoseconds. + Weight::from_parts(2_153_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -209,8 +239,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 17_141_000 picoseconds. - Weight::from_parts(17_500_000, 0) + // Minimum execution time: 16_592_000 picoseconds. + Weight::from_parts(16_900_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -221,8 +251,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 17_074_000 picoseconds. - Weight::from_parts(17_431_000, 0) + // Minimum execution time: 16_694_000 picoseconds. + Weight::from_parts(16_905_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -233,8 +263,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 19_139_000 picoseconds. - Weight::from_parts(19_474_000, 0) + // Minimum execution time: 17_779_000 picoseconds. + Weight::from_parts(18_490_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -254,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 24_346_000 picoseconds. - Weight::from_parts(25_318_000, 0) + // Minimum execution time: 24_526_000 picoseconds. + Weight::from_parts(25_182_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -266,8 +296,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_777_000 picoseconds. - Weight::from_parts(12_051_000, 0) + // Minimum execution time: 10_467_000 picoseconds. + Weight::from_parts(10_934_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -277,8 +307,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 17_538_000 picoseconds. - Weight::from_parts(17_832_000, 0) + // Minimum execution time: 16_377_000 picoseconds. + Weight::from_parts(17_114_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -299,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `13507` - // Minimum execution time: 33_623_000 picoseconds. - Weight::from_parts(34_186_000, 0) + // Minimum execution time: 32_575_000 picoseconds. + Weight::from_parts(33_483_000, 0) .saturating_add(Weight::from_parts(0, 13507)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -313,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_363_000 picoseconds. - Weight::from_parts(3_511_000, 0) + // Minimum execution time: 3_604_000 picoseconds. + Weight::from_parts(3_744_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 23_969_000 picoseconds. - Weight::from_parts(24_347_000, 0) + // Minimum execution time: 23_983_000 picoseconds. + Weight::from_parts(24_404_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -337,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_071_000 picoseconds. - Weight::from_parts(35_031_000, 0) + // Minimum execution time: 34_446_000 picoseconds. + Weight::from_parts(35_465_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs index 0082db3099d..a1701c5f1c2 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,8 +62,28 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 18_410_000 picoseconds. - Weight::from_parts(18_657_000, 0) + // Minimum execution time: 17_681_000 picoseconds. + Weight::from_parts(18_350_000, 0) + .saturating_add(Weight::from_parts(0, 3539)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `74` + // Estimated: `3539` + // Minimum execution time: 18_091_000 picoseconds. + Weight::from_parts(18_327_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -84,8 +104,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 56_616_000 picoseconds. - Weight::from_parts(57_751_000, 0) + // Minimum execution time: 54_943_000 picoseconds. + Weight::from_parts(56_519_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -120,14 +140,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_014_000 picoseconds. - Weight::from_parts(6_412_000, 0) + // Minimum execution time: 5_887_000 picoseconds. + Weight::from_parts(6_101_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -137,8 +167,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_844_000 picoseconds. - Weight::from_parts(1_957_000, 0) + // Minimum execution time: 1_940_000 picoseconds. + Weight::from_parts(2_022_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +192,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 24_067_000 picoseconds. - Weight::from_parts(24_553_000, 0) + // Minimum execution time: 23_165_000 picoseconds. + Weight::from_parts(23_800_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -186,8 +216,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 27_023_000 picoseconds. - Weight::from_parts(27_620_000, 0) + // Minimum execution time: 26_506_000 picoseconds. + Weight::from_parts(27_180_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -198,8 +228,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_866_000 picoseconds. - Weight::from_parts(1_984_000, 0) + // Minimum execution time: 1_868_000 picoseconds. + Weight::from_parts(2_002_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -209,8 +239,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_425_000 picoseconds. - Weight::from_parts(16_680_000, 0) + // Minimum execution time: 16_138_000 picoseconds. + Weight::from_parts(16_447_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -221,8 +251,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_171_000 picoseconds. - Weight::from_parts(16_564_000, 0) + // Minimum execution time: 16_099_000 picoseconds. + Weight::from_parts(16_592_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -233,8 +263,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 17_785_000 picoseconds. - Weight::from_parts(18_123_000, 0) + // Minimum execution time: 17_972_000 picoseconds. + Weight::from_parts(18_379_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -254,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 23_903_000 picoseconds. - Weight::from_parts(24_769_000, 0) + // Minimum execution time: 23_554_000 picoseconds. + Weight::from_parts(24_446_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -266,8 +296,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 10_617_000 picoseconds. - Weight::from_parts(10_843_000, 0) + // Minimum execution time: 10_541_000 picoseconds. + Weight::from_parts(10_894_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -277,8 +307,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_656_000 picoseconds. - Weight::from_parts(17_106_000, 0) + // Minimum execution time: 16_404_000 picoseconds. + Weight::from_parts(16_818_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -299,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `13507` - // Minimum execution time: 31_721_000 picoseconds. - Weight::from_parts(32_547_000, 0) + // Minimum execution time: 31_617_000 picoseconds. + Weight::from_parts(32_336_000, 0) .saturating_add(Weight::from_parts(0, 13507)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -313,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_439_000 picoseconds. - Weight::from_parts(3_619_000, 0) + // Minimum execution time: 3_328_000 picoseconds. + Weight::from_parts(3_501_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_657_000 picoseconds. - Weight::from_parts(24_971_000, 0) + // Minimum execution time: 23_571_000 picoseconds. + Weight::from_parts(24_312_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -337,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_028_000 picoseconds. - Weight::from_parts(34_697_000, 0) + // Minimum execution time: 32_879_000 picoseconds. + Weight::from_parts(33_385_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs index fabce29b5fd..ac494fdc719 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,8 +62,28 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 17_830_000 picoseconds. - Weight::from_parts(18_411_000, 0) + // Minimum execution time: 17_935_000 picoseconds. + Weight::from_parts(18_482_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 18_311_000 picoseconds. + Weight::from_parts(18_850_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -84,8 +104,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 55_456_000 picoseconds. - Weight::from_parts(56_808_000, 0) + // Minimum execution time: 56_182_000 picoseconds. + Weight::from_parts(58_136_000, 0) .saturating_add(Weight::from_parts(0, 3535)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -120,14 +140,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_996_000 picoseconds. - Weight::from_parts(6_154_000, 0) + // Minimum execution time: 5_979_000 picoseconds. + Weight::from_parts(6_289_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -137,8 +167,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_768_000 picoseconds. - Weight::from_parts(1_914_000, 0) + // Minimum execution time: 1_853_000 picoseconds. + Weight::from_parts(2_045_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +192,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 24_120_000 picoseconds. - Weight::from_parts(24_745_000, 0) + // Minimum execution time: 23_827_000 picoseconds. + Weight::from_parts(24_493_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -186,8 +216,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_630_000 picoseconds. - Weight::from_parts(27_289_000, 0) + // Minimum execution time: 26_755_000 picoseconds. + Weight::from_parts(27_125_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -198,8 +228,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_821_000 picoseconds. - Weight::from_parts(1_946_000, 0) + // Minimum execution time: 1_898_000 picoseconds. + Weight::from_parts(2_028_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -209,8 +239,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_586_000 picoseconds. - Weight::from_parts(16_977_000, 0) + // Minimum execution time: 16_300_000 picoseconds. + Weight::from_parts(16_995_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -221,8 +251,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_923_000 picoseconds. - Weight::from_parts(17_415_000, 0) + // Minimum execution time: 16_495_000 picoseconds. + Weight::from_parts(16_950_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -233,8 +263,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_596_000 picoseconds. - Weight::from_parts(18_823_000, 0) + // Minimum execution time: 18_153_000 picoseconds. + Weight::from_parts(18_595_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -254,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 23_817_000 picoseconds. - Weight::from_parts(24_520_000, 0) + // Minimum execution time: 23_387_000 picoseconds. + Weight::from_parts(24_677_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -266,8 +296,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_042_000 picoseconds. - Weight::from_parts(11_578_000, 0) + // Minimum execution time: 10_939_000 picoseconds. + Weight::from_parts(11_210_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -277,8 +307,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 17_306_000 picoseconds. - Weight::from_parts(17_817_000, 0) + // Minimum execution time: 16_850_000 picoseconds. + Weight::from_parts(17_195_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -299,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 32_141_000 picoseconds. - Weight::from_parts(32_954_000, 0) + // Minimum execution time: 31_931_000 picoseconds. + Weight::from_parts(32_494_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -313,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_410_000 picoseconds. - Weight::from_parts(3_556_000, 0) + // Minimum execution time: 3_514_000 picoseconds. + Weight::from_parts(3_709_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_021_000 picoseconds. - Weight::from_parts(25_240_000, 0) + // Minimum execution time: 24_863_000 picoseconds. + Weight::from_parts(25_293_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -337,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 33_801_000 picoseconds. - Weight::from_parts(34_655_000, 0) + // Minimum execution time: 33_799_000 picoseconds. + Weight::from_parts(34_665_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs index c337289243b..62a9c802808 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,8 +62,28 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 17_856_000 picoseconds. - Weight::from_parts(18_473_000, 0) + // Minimum execution time: 17_450_000 picoseconds. + Weight::from_parts(17_913_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 18_082_000 picoseconds. + Weight::from_parts(18_293_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -84,8 +104,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 56_112_000 picoseconds. - Weight::from_parts(57_287_000, 0) + // Minimum execution time: 54_939_000 picoseconds. + Weight::from_parts(55_721_000, 0) .saturating_add(Weight::from_parts(0, 3535)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -120,14 +140,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_186_000 picoseconds. - Weight::from_parts(6_420_000, 0) + // Minimum execution time: 5_789_000 picoseconds. + Weight::from_parts(5_995_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -137,8 +167,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_824_000 picoseconds. - Weight::from_parts(1_999_000, 0) + // Minimum execution time: 1_795_000 picoseconds. + Weight::from_parts(1_924_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +192,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_833_000 picoseconds. - Weight::from_parts(24_636_000, 0) + // Minimum execution time: 23_445_000 picoseconds. + Weight::from_parts(23_906_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -186,8 +216,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_557_000 picoseconds. - Weight::from_parts(27_275_000, 0) + // Minimum execution time: 26_590_000 picoseconds. + Weight::from_parts(27_056_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -198,8 +228,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_921_000 picoseconds. - Weight::from_parts(2_040_000, 0) + // Minimum execution time: 1_889_000 picoseconds. + Weight::from_parts(1_962_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -209,8 +239,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_832_000 picoseconds. - Weight::from_parts(17_312_000, 0) + // Minimum execution time: 16_408_000 picoseconds. + Weight::from_parts(16_877_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -221,8 +251,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_687_000 picoseconds. - Weight::from_parts(17_123_000, 0) + // Minimum execution time: 16_791_000 picoseconds. + Weight::from_parts(17_111_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -233,8 +263,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_164_000 picoseconds. - Weight::from_parts(18_580_000, 0) + // Minimum execution time: 18_355_000 picoseconds. + Weight::from_parts(19_110_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -254,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 23_577_000 picoseconds. - Weight::from_parts(24_324_000, 0) + // Minimum execution time: 23_354_000 picoseconds. + Weight::from_parts(23_999_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -266,8 +296,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_014_000 picoseconds. - Weight::from_parts(11_223_000, 0) + // Minimum execution time: 11_065_000 picoseconds. + Weight::from_parts(11_302_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -277,8 +307,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_887_000 picoseconds. - Weight::from_parts(17_361_000, 0) + // Minimum execution time: 16_998_000 picoseconds. + Weight::from_parts(17_509_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -299,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 31_705_000 picoseconds. - Weight::from_parts(32_166_000, 0) + // Minimum execution time: 31_068_000 picoseconds. + Weight::from_parts(31_978_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -313,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_568_000 picoseconds. - Weight::from_parts(3_669_000, 0) + // Minimum execution time: 3_478_000 picoseconds. + Weight::from_parts(3_595_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_823_000 picoseconds. - Weight::from_parts(25_344_000, 0) + // Minimum execution time: 24_962_000 picoseconds. + Weight::from_parts(25_404_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -337,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_516_000 picoseconds. - Weight::from_parts(35_478_000, 0) + // Minimum execution time: 32_685_000 picoseconds. + Weight::from_parts(33_592_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs index ac7100d7858..cf364b6ac79 100644 --- a/polkadot/runtime/rococo/src/impls.rs +++ b/polkadot/runtime/rococo/src/impls.rs @@ -167,11 +167,16 @@ where }, ]); + let encoded_versioned_xcm = + VersionedXcm::V4(program).encode().try_into().map_err(|error| { + log::error!(target: "runtime::on_reap_identity", "XCM too large, error: {:?}", error); + pallet_xcm::Error::::XcmTooLarge + })?; // send - let _ = >::send( + let _ = >::send_blob( RawOrigin::Root.into(), Box::new(VersionedLocation::V4(destination)), - Box::new(VersionedXcm::V4(program)), + encoded_versioned_xcm, )?; Ok(()) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index 5544ca44658..42972baa1c8 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -60,8 +60,26 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 25_043_000 picoseconds. - Weight::from_parts(25_682_000, 0) + // Minimum execution time: 24_724_000 picoseconds. + Weight::from_parts(25_615_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 24_709_000 picoseconds. + Weight::from_parts(25_326_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -80,8 +98,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 107_570_000 picoseconds. - Weight::from_parts(109_878_000, 0) + // Minimum execution time: 106_600_000 picoseconds. + Weight::from_parts(110_781_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -100,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `232` // Estimated: `3697` - // Minimum execution time: 106_341_000 picoseconds. - Weight::from_parts(109_135_000, 0) + // Minimum execution time: 103_030_000 picoseconds. + Weight::from_parts(106_018_000, 0) .saturating_add(Weight::from_parts(0, 3697)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -120,8 +138,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 108_372_000 picoseconds. - Weight::from_parts(112_890_000, 0) + // Minimum execution time: 107_017_000 picoseconds. + Weight::from_parts(109_214_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -130,8 +148,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_957_000 picoseconds. - Weight::from_parts(7_417_000, 0) + // Minimum execution time: 6_864_000 picoseconds. + Weight::from_parts(7_135_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_955_000 picoseconds. + Weight::from_parts(7_165_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) @@ -140,8 +166,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_053_000 picoseconds. - Weight::from_parts(7_462_000, 0) + // Minimum execution time: 6_827_000 picoseconds. + Weight::from_parts(7_211_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -149,8 +175,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_918_000 picoseconds. - Weight::from_parts(2_037_000, 0) + // Minimum execution time: 1_788_000 picoseconds. + Weight::from_parts(2_021_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -171,8 +197,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 30_417_000 picoseconds. - Weight::from_parts(31_191_000, 0) + // Minimum execution time: 30_627_000 picoseconds. + Weight::from_parts(31_350_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -193,8 +219,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `360` // Estimated: `3825` - // Minimum execution time: 36_666_000 picoseconds. - Weight::from_parts(37_779_000, 0) + // Minimum execution time: 36_688_000 picoseconds. + Weight::from_parts(37_345_000, 0) .saturating_add(Weight::from_parts(0, 3825)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -205,8 +231,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_869_000 picoseconds. - Weight::from_parts(2_003_000, 0) + // Minimum execution time: 1_829_000 picoseconds. + Weight::from_parts(1_986_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -216,8 +242,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `13387` - // Minimum execution time: 16_188_000 picoseconds. - Weight::from_parts(16_435_000, 0) + // Minimum execution time: 16_104_000 picoseconds. + Weight::from_parts(16_464_000, 0) .saturating_add(Weight::from_parts(0, 13387)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -228,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `26` // Estimated: `13391` - // Minimum execution time: 16_431_000 picoseconds. - Weight::from_parts(16_935_000, 0) + // Minimum execution time: 16_267_000 picoseconds. + Weight::from_parts(16_675_000, 0) .saturating_add(Weight::from_parts(0, 13391)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -240,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `40` // Estimated: `15880` - // Minimum execution time: 18_460_000 picoseconds. - Weight::from_parts(18_885_000, 0) + // Minimum execution time: 18_487_000 picoseconds. + Weight::from_parts(19_102_000, 0) .saturating_add(Weight::from_parts(0, 15880)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -259,8 +285,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `216` // Estimated: `6156` - // Minimum execution time: 29_623_000 picoseconds. - Weight::from_parts(30_661_000, 0) + // Minimum execution time: 29_603_000 picoseconds. + Weight::from_parts(31_002_000, 0) .saturating_add(Weight::from_parts(0, 6156)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -271,8 +297,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `69` // Estimated: `10959` - // Minimum execution time: 12_043_000 picoseconds. - Weight::from_parts(12_360_000, 0) + // Minimum execution time: 12_183_000 picoseconds. + Weight::from_parts(12_587_000, 0) .saturating_add(Weight::from_parts(0, 10959)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -282,8 +308,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `33` // Estimated: `13398` - // Minimum execution time: 16_511_000 picoseconds. - Weight::from_parts(17_011_000, 0) + // Minimum execution time: 16_372_000 picoseconds. + Weight::from_parts(16_967_000, 0) .saturating_add(Weight::from_parts(0, 13398)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -302,8 +328,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `216` // Estimated: `13581` - // Minimum execution time: 39_041_000 picoseconds. - Weight::from_parts(39_883_000, 0) + // Minimum execution time: 38_904_000 picoseconds. + Weight::from_parts(39_983_000, 0) .saturating_add(Weight::from_parts(0, 13581)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) @@ -316,8 +342,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_030_000 picoseconds. - Weight::from_parts(2_150_000, 0) + // Minimum execution time: 2_067_000 picoseconds. + Weight::from_parts(2_195_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -328,8 +354,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 22_615_000 picoseconds. - Weight::from_parts(23_008_000, 0) + // Minimum execution time: 23_982_000 picoseconds. + Weight::from_parts(24_409_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -340,8 +366,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 34_438_000 picoseconds. - Weight::from_parts(35_514_000, 0) + // Minimum execution time: 33_430_000 picoseconds. + Weight::from_parts(34_433_000, 0) .saturating_add(Weight::from_parts(0, 3488)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs index 71e6b696a20..d8741c939a5 100644 --- a/polkadot/runtime/westend/src/impls.rs +++ b/polkadot/runtime/westend/src/impls.rs @@ -167,11 +167,16 @@ where }, ]); + let encoded_versioned_xcm = + VersionedXcm::V4(program).encode().try_into().map_err(|error| { + log::error!(target: "runtime::on_reap_identity", "XCM too large, error: {:?}", error); + pallet_xcm::Error::::XcmTooLarge + })?; // send - let _ = >::send( + let _ = >::send_blob( RawOrigin::Root.into(), Box::new(VersionedLocation::V4(destination)), - Box::new(VersionedXcm::V4(program)), + encoded_versioned_xcm, )?; Ok(()) } diff --git a/polkadot/runtime/westend/src/weights/pallet_xcm.rs b/polkadot/runtime/westend/src/weights/pallet_xcm.rs index 10725cecf24..80bc551ba1e 100644 --- a/polkadot/runtime/westend/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -60,8 +60,26 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 25_725_000 picoseconds. - Weight::from_parts(26_174_000, 0) + // Minimum execution time: 24_535_000 picoseconds. + Weight::from_parts(25_618_000, 0) + .saturating_add(Weight::from_parts(0, 3612)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 25_376_000 picoseconds. + Weight::from_parts(26_180_000, 0) .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -80,8 +98,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `250` // Estimated: `6196` - // Minimum execution time: 113_140_000 picoseconds. - Weight::from_parts(116_204_000, 0) + // Minimum execution time: 108_786_000 picoseconds. + Weight::from_parts(112_208_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -100,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `302` // Estimated: `6196` - // Minimum execution time: 108_571_000 picoseconds. - Weight::from_parts(110_650_000, 0) + // Minimum execution time: 105_190_000 picoseconds. + Weight::from_parts(107_140_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -120,8 +138,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `250` // Estimated: `6196` - // Minimum execution time: 111_836_000 picoseconds. - Weight::from_parts(114_435_000, 0) + // Minimum execution time: 109_027_000 picoseconds. + Weight::from_parts(111_404_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -136,14 +154,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_160_000 picoseconds. - Weight::from_parts(7_477_000, 0) + // Minimum execution time: 6_668_000 picoseconds. + Weight::from_parts(7_013_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -151,8 +179,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_934_000 picoseconds. - Weight::from_parts(2_053_000, 0) + // Minimum execution time: 1_740_000 picoseconds. + Weight::from_parts(1_884_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -173,8 +201,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 31_123_000 picoseconds. - Weight::from_parts(31_798_000, 0) + // Minimum execution time: 30_200_000 picoseconds. + Weight::from_parts(30_768_000, 0) .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -195,8 +223,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `327` // Estimated: `3792` - // Minimum execution time: 35_175_000 picoseconds. - Weight::from_parts(36_098_000, 0) + // Minimum execution time: 33_928_000 picoseconds. + Weight::from_parts(35_551_000, 0) .saturating_add(Weight::from_parts(0, 3792)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -207,8 +235,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_974_000 picoseconds. - Weight::from_parts(2_096_000, 0) + // Minimum execution time: 1_759_000 picoseconds. + Weight::from_parts(1_880_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -218,8 +246,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `13387` - // Minimum execution time: 16_626_000 picoseconds. - Weight::from_parts(17_170_000, 0) + // Minimum execution time: 16_507_000 picoseconds. + Weight::from_parts(17_219_000, 0) .saturating_add(Weight::from_parts(0, 13387)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -230,8 +258,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `26` // Estimated: `13391` - // Minimum execution time: 16_937_000 picoseconds. - Weight::from_parts(17_447_000, 0) + // Minimum execution time: 16_633_000 picoseconds. + Weight::from_parts(16_889_000, 0) .saturating_add(Weight::from_parts(0, 13391)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -242,8 +270,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `40` // Estimated: `15880` - // Minimum execution time: 19_157_000 picoseconds. - Weight::from_parts(19_659_000, 0) + // Minimum execution time: 19_297_000 picoseconds. + Weight::from_parts(19_820_000, 0) .saturating_add(Weight::from_parts(0, 15880)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -261,8 +289,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `183` // Estimated: `6123` - // Minimum execution time: 30_699_000 picoseconds. - Weight::from_parts(31_537_000, 0) + // Minimum execution time: 30_364_000 picoseconds. + Weight::from_parts(31_122_000, 0) .saturating_add(Weight::from_parts(0, 6123)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -273,8 +301,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `69` // Estimated: `10959` - // Minimum execution time: 12_303_000 picoseconds. - Weight::from_parts(12_670_000, 0) + // Minimum execution time: 11_997_000 picoseconds. + Weight::from_parts(12_392_000, 0) .saturating_add(Weight::from_parts(0, 10959)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -284,8 +312,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `33` // Estimated: `13398` - // Minimum execution time: 17_129_000 picoseconds. - Weight::from_parts(17_668_000, 0) + // Minimum execution time: 16_894_000 picoseconds. + Weight::from_parts(17_452_000, 0) .saturating_add(Weight::from_parts(0, 13398)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -304,8 +332,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `183` // Estimated: `13548` - // Minimum execution time: 39_960_000 picoseconds. - Weight::from_parts(41_068_000, 0) + // Minimum execution time: 39_864_000 picoseconds. + Weight::from_parts(40_859_000, 0) .saturating_add(Weight::from_parts(0, 13548)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) @@ -318,8 +346,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_333_000 picoseconds. - Weight::from_parts(2_504_000, 0) + // Minimum execution time: 2_363_000 picoseconds. + Weight::from_parts(2_519_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -330,8 +358,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 22_932_000 picoseconds. - Weight::from_parts(23_307_000, 0) + // Minimum execution time: 22_409_000 picoseconds. + Weight::from_parts(22_776_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -342,8 +370,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 34_558_000 picoseconds. - Weight::from_parts(35_299_000, 0) + // Minimum execution time: 33_551_000 picoseconds. + Weight::from_parts(34_127_000, 0) .saturating_add(Weight::from_parts(0, 3488)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index ed42f93692b..e2903d592dc 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -16,6 +16,7 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; +use codec::Encode; use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; use frame_support::{ traits::fungible::{Inspect, Mutate}, @@ -108,6 +109,21 @@ benchmarks! { let versioned_msg = VersionedXcm::from(msg); }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) + send_blob { + let send_origin = + T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + if T::SendXcmOrigin::try_origin(send_origin.clone()).is_err() { + return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) + } + let msg = Xcm::<()>(vec![ClearOrigin]); + let versioned_dest: VersionedLocation = T::reachable_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )? + .into(); + let versioned_msg = VersionedXcm::from(msg); + let encoded_versioned_msg = versioned_msg.encode().try_into().unwrap(); + }: _>(send_origin, Box::new(versioned_dest), encoded_versioned_msg) + teleport_assets { let (asset, destination) = T::teleportable_asset_and_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), @@ -227,6 +243,19 @@ benchmarks! { let versioned_msg = VersionedXcm::from(msg); }: _>(execute_origin, Box::new(versioned_msg), Weight::MAX) + execute_blob { + let execute_origin = + T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let origin_location = T::ExecuteXcmOrigin::try_origin(execute_origin.clone()) + .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + let msg = Xcm(vec![ClearOrigin]); + if !T::XcmExecuteFilter::contains(&(origin_location, msg.clone())) { + return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) + } + let versioned_msg = VersionedXcm::from(msg); + let encoded_versioned_msg = versioned_msg.encode().try_into().unwrap(); + }: _>(execute_origin, encoded_versioned_msg, Weight::MAX) + force_xcm_version { let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 58a597de5ab..29b61988f73 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -50,8 +50,8 @@ use sp_runtime::{ use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; use xcm::{latest::QueryResponseInfo, prelude::*}; use xcm_builder::{ - ExecuteController, ExecuteControllerWeightInfo, QueryController, QueryControllerWeightInfo, - SendController, SendControllerWeightInfo, + ExecuteController, ExecuteControllerWeightInfo, MaxXcmEncodedSize, QueryController, + QueryControllerWeightInfo, SendController, SendControllerWeightInfo, }; use xcm_executor::{ traits::{ @@ -87,6 +87,8 @@ pub trait WeightInfo { fn new_query() -> Weight; fn take_response() -> Weight; fn claim_assets() -> Weight; + fn execute_blob() -> Weight; + fn send_blob() -> Weight; } /// fallback implementation @@ -171,6 +173,14 @@ impl WeightInfo for TestWeightInfo { fn claim_assets() -> Weight { Weight::from_parts(100_000_000, 0) } + + fn execute_blob() -> Weight { + Weight::from_parts(100_000_000, 0) + } + + fn send_blob() -> Weight { + Weight::from_parts(100_000_000, 0) + } } #[frame_support::pallet] @@ -286,76 +296,49 @@ pub mod pallet { } impl ExecuteControllerWeightInfo for Pallet { - fn execute() -> Weight { - T::WeightInfo::execute() + fn execute_blob() -> Weight { + T::WeightInfo::execute_blob() } } impl ExecuteController, ::RuntimeCall> for Pallet { type WeightInfo = Self; - fn execute( + fn execute_blob( origin: OriginFor, - message: Box::RuntimeCall>>, + encoded_message: BoundedVec, max_weight: Weight, ) -> Result { - log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); - let outcome = (|| { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let mut hash = message.using_encoded(sp_io::hashing::blake2_256); - let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; - let value = (origin_location, message); - ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); - let (origin_location, message) = value; - Ok(T::XcmExecutor::prepare_and_execute( - origin_location, - message, - &mut hash, - max_weight, - max_weight, - )) - })() - .map_err(|e: DispatchError| { - e.with_weight(::execute()) - })?; - - Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); - let weight_used = outcome.weight_used(); - outcome.ensure_complete().map_err(|error| { - log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); - Error::::LocalExecutionIncomplete.with_weight( - weight_used.saturating_add( - ::execute(), - ), - ) - })?; - Ok(weight_used) + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let message = + VersionedXcm::<::RuntimeCall>::decode(&mut &encoded_message[..]) + .map_err(|error| { + log::error!(target: "xcm::execute_blob", "Unable to decode XCM, error: {:?}", error); + Error::::UnableToDecode + })?; + Self::execute_base(origin_location, Box::new(message), max_weight) } } impl SendControllerWeightInfo for Pallet { - fn send() -> Weight { - T::WeightInfo::send() + fn send_blob() -> Weight { + T::WeightInfo::send_blob() } } impl SendController> for Pallet { type WeightInfo = Self; - fn send( + fn send_blob( origin: OriginFor, dest: Box, - message: Box>, + encoded_message: BoundedVec, ) -> Result { let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; - let interior: Junctions = - origin_location.clone().try_into().map_err(|_| Error::::InvalidOrigin)?; - let dest = Location::try_from(*dest).map_err(|()| Error::::BadVersion)?; - let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; - - let message_id = Self::send_xcm(interior, dest.clone(), message.clone()) - .map_err(Error::::from)?; - let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; - Self::deposit_event(e); - Ok(message_id) + let message = + VersionedXcm::<()>::decode(&mut &encoded_message[..]).map_err(|error| { + log::error!(target: "xcm::send_blob", "Unable to decode XCM, error: {:?}", error); + Error::::UnableToDecode + })?; + Self::send_base(origin_location, dest, Box::new(message)) } } @@ -562,6 +545,11 @@ pub mod pallet { TooManyReserves, /// Local XCM execution incomplete. LocalExecutionIncomplete, + /// Could not decode XCM. + UnableToDecode, + /// XCM encoded length is too large. + /// Returned when an XCM encoded length is larger than `MaxXcmEncodedSize`. + XcmTooLarge, } impl From for Error { @@ -899,8 +887,64 @@ pub mod pallet { } } + impl Pallet { + /// Underlying logic for both [`execute_blob`] and [`execute`]. + fn execute_base( + origin_location: Location, + message: Box::RuntimeCall>>, + max_weight: Weight, + ) -> Result { + log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); + let outcome = (|| { + let mut hash = message.using_encoded(sp_io::hashing::blake2_256); + let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; + let value = (origin_location, message); + ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); + let (origin_location, message) = value; + Ok(T::XcmExecutor::prepare_and_execute( + origin_location, + message, + &mut hash, + max_weight, + max_weight, + )) + })() + .map_err(|e: DispatchError| e.with_weight(T::WeightInfo::execute()))?; + + Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); + let weight_used = outcome.weight_used(); + outcome.ensure_complete().map_err(|error| { + log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); + Error::::LocalExecutionIncomplete + .with_weight(weight_used.saturating_add(T::WeightInfo::execute())) + })?; + Ok(weight_used) + } + + /// Underlying logic for both [`send_blob`] and [`send`]. + fn send_base( + origin_location: Location, + dest: Box, + message: Box>, + ) -> Result { + let interior: Junctions = + origin_location.clone().try_into().map_err(|_| Error::::InvalidOrigin)?; + let dest = Location::try_from(*dest).map_err(|()| Error::::BadVersion)?; + let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; + + let message_id = Self::send_xcm(interior, dest.clone(), message.clone()) + .map_err(Error::::from)?; + let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; + Self::deposit_event(e); + Ok(message_id) + } + } + #[pallet::call] impl Pallet { + /// WARNING: DEPRECATED. `send` will be removed after June 2024. Use `send_blob` instead. + #[allow(deprecated)] + #[deprecated(note = "`send` will be removed after June 2024. Use `send_blob` instead.")] #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::send())] pub fn send( @@ -908,7 +952,8 @@ pub mod pallet { dest: Box, message: Box>, ) -> DispatchResult { - >::send(origin, dest, message)?; + let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; + Self::send_base(origin_location, dest, message)?; Ok(()) } @@ -1031,6 +1076,13 @@ pub mod pallet { /// No more than `max_weight` will be used in its attempted execution. If this is less than /// the maximum amount of weight that the message could take to be executed, then no /// execution attempt will be made. + /// + /// WARNING: DEPRECATED. `execute` will be removed after June 2024. Use `execute_blob` + /// instead. + #[allow(deprecated)] + #[deprecated( + note = "`execute` will be removed after June 2024. Use `execute_blob` instead." + )] #[pallet::call_index(3)] #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] pub fn execute( @@ -1038,8 +1090,8 @@ pub mod pallet { message: Box::RuntimeCall>>, max_weight: Weight, ) -> DispatchResultWithPostInfo { - let weight_used = - >::execute(origin, message, max_weight)?; + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let weight_used = Self::execute_base(origin_location, message, max_weight)?; Ok(Some(weight_used.saturating_add(T::WeightInfo::execute())).into()) } @@ -1450,6 +1502,48 @@ pub mod pallet { })?; Ok(()) } + + /// Execute an XCM from a local, signed, origin. + /// + /// An event is deposited indicating whether the message could be executed completely + /// or only partially. + /// + /// No more than `max_weight` will be used in its attempted execution. If this is less than + /// the maximum amount of weight that the message could take to be executed, then no + /// execution attempt will be made. + /// + /// The message is passed in encoded. It needs to be decodable as a [`VersionedXcm`]. + #[pallet::call_index(13)] + #[pallet::weight(T::WeightInfo::execute_blob())] + pub fn execute_blob( + origin: OriginFor, + encoded_message: BoundedVec, + max_weight: Weight, + ) -> DispatchResultWithPostInfo { + let weight_used = >::execute_blob( + origin, + encoded_message, + max_weight, + )?; + Ok(Some(weight_used.saturating_add(T::WeightInfo::execute_blob())).into()) + } + + /// Send an XCM from a local, signed, origin. + /// + /// The destination, `dest`, will receive this message with a `DescendOrigin` instruction + /// that makes the origin of the message be the origin on this system. + /// + /// The message is passed in encoded. It needs to be decodable as a [`VersionedXcm`]. + #[pallet::call_index(14)] + #[pallet::weight(T::WeightInfo::send_blob())] + pub fn send_blob( + origin: OriginFor, + dest: Box, + encoded_message: BoundedVec, + ) -> DispatchResult { + >::send_blob(origin, dest, encoded_message)?; + Ok(()) + } } } diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 13022d9a8b1..763d768e154 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -20,10 +20,10 @@ pub(crate) mod assets_transfer; use crate::{ mock::*, pallet::SupportedVersion, AssetTraps, Config, CurrentMigration, Error, - ExecuteControllerWeightInfo, LatestVersionedLocation, Pallet, Queries, QueryStatus, - VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, VersionNotifyTargets, - WeightInfo, + LatestVersionedLocation, Pallet, Queries, QueryStatus, VersionDiscoveryQueue, + VersionMigrationStage, VersionNotifiers, VersionNotifyTargets, WeightInfo, }; +use codec::Encode; use frame_support::{ assert_err_ignore_postinfo, assert_noop, assert_ok, traits::{Currency, Hooks}, @@ -305,11 +305,12 @@ fn send_works() { ]); let versioned_dest = Box::new(RelayLocation::get().into()); - let versioned_message = Box::new(VersionedXcm::from(message.clone())); - assert_ok!(XcmPallet::send( + let versioned_message = VersionedXcm::from(message.clone()); + let encoded_versioned_message = versioned_message.encode().try_into().unwrap(); + assert_ok!(XcmPallet::send_blob( RuntimeOrigin::signed(ALICE), versioned_dest, - versioned_message + encoded_versioned_message )); let sent_message = Xcm(Some(DescendOrigin(sender.clone().try_into().unwrap())) .into_iter() @@ -341,16 +342,16 @@ fn send_fails_when_xcm_router_blocks() { ]; new_test_ext_with_balances(balances).execute_with(|| { let sender: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - let message = Xcm(vec![ + let message = Xcm::<()>(vec![ ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), buy_execution((Parent, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: sender }, ]); assert_noop!( - XcmPallet::send( + XcmPallet::send_blob( RuntimeOrigin::signed(ALICE), Box::new(Location::ancestor(8).into()), - Box::new(VersionedXcm::from(message.clone())), + VersionedXcm::from(message.clone()).encode().try_into().unwrap(), ), crate::Error::::SendFailure ); @@ -371,13 +372,16 @@ fn execute_withdraw_to_deposit_works() { let weight = BaseXcmWeight::get() * 3; let dest: Location = Junction::AccountId32 { network: None, id: BOB.into() }.into(); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::execute( + assert_ok!(XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm::(vec![ WithdrawAsset((Here, SEND_AMOUNT).into()), buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); @@ -399,18 +403,21 @@ fn trapped_assets_can_be_claimed() { let weight = BaseXcmWeight::get() * 6; let dest: Location = Junction::AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::execute( + assert_ok!(XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm(vec![ WithdrawAsset((Here, SEND_AMOUNT).into()), buy_execution((Here, SEND_AMOUNT)), // Don't propagated the error into the result. - SetErrorHandler(Xcm(vec![ClearError])), + SetErrorHandler(Xcm::(vec![ClearError])), // This will make an error. Trap(0), // This would succeed, but we never get to it. DepositAsset { assets: AllCounted(1).into(), beneficiary: dest.clone() }, - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight )); let source: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); @@ -437,13 +444,16 @@ fn trapped_assets_can_be_claimed() { assert_eq!(trapped, expected); let weight = BaseXcmWeight::get() * 3; - assert_ok!(XcmPallet::execute( + assert_ok!(XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm::(vec![ ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest.clone() }, - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight )); @@ -453,13 +463,16 @@ fn trapped_assets_can_be_claimed() { // Can't claim twice. assert_err_ignore_postinfo!( - XcmPallet::execute( + XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm::(vec![ ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight ), Error::::LocalExecutionIncomplete @@ -473,12 +486,13 @@ fn claim_assets_works() { let balances = vec![(ALICE, INITIAL_BALANCE)]; new_test_ext_with_balances(balances).execute_with(|| { // First trap some assets. - let trapping_program = - Xcm::builder_unsafe().withdraw_asset((Here, SEND_AMOUNT).into()).build(); + let trapping_program = Xcm::::builder_unsafe() + .withdraw_asset((Here, SEND_AMOUNT).into()) + .build(); // Even though assets are trapped, the extrinsic returns success. - assert_ok!(XcmPallet::execute( + assert_ok!(XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::V4(trapping_program)), + VersionedXcm::V4(trapping_program).encode().try_into().unwrap(), BaseXcmWeight::get() * 2, )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); @@ -531,9 +545,9 @@ fn incomplete_execute_reverts_side_effects() { assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); let amount_to_send = INITIAL_BALANCE - ExistentialDeposit::get(); let assets: Assets = (Here, amount_to_send).into(); - let result = XcmPallet::execute( + let result = XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm::(vec![ // Withdraw + BuyExec + Deposit should work WithdrawAsset(assets.clone()), buy_execution(assets.inner()[0].clone()), @@ -541,7 +555,10 @@ fn incomplete_execute_reverts_side_effects() { // Withdrawing once more will fail because of InsufficientBalance, and we expect to // revert the effects of the above instructions as well WithdrawAsset(assets), - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight, ); // all effects are reverted and balances unchanged for either sender or receiver @@ -553,7 +570,7 @@ fn incomplete_execute_reverts_side_effects() { Err(sp_runtime::DispatchErrorWithPostInfo { post_info: frame_support::dispatch::PostDispatchInfo { actual_weight: Some( - as ExecuteControllerWeightInfo>::execute() + weight + <::WeightInfo>::execute_blob() + weight ), pays_fee: frame_support::dispatch::Pays::Yes, }, diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index 86a17fa1e88..e836486e86e 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -48,6 +48,9 @@ mod tests; /// Maximum nesting level for XCM decoding. pub const MAX_XCM_DECODE_DEPTH: u32 = 8; +/// Maximum encoded size. +/// See `decoding_respects_limit` test for more reasoning behind this value. +pub const MAX_XCM_ENCODED_SIZE: u32 = 12402; /// A version of XCM. pub type Version = u32; diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index 30ee485589a..6635408282e 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -1488,7 +1488,21 @@ mod tests { let encoded = big_xcm.encode(); assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); - let nested_xcm = Xcm::<()>(vec![ + let mut many_assets = Assets::new(); + for index in 0..MAX_ITEMS_IN_ASSETS { + many_assets.push((GeneralIndex(index as u128), 1u128).into()); + } + + let full_xcm_pass = + Xcm::<()>(vec![ + TransferAsset { assets: many_assets, beneficiary: Here.into() }; + MAX_INSTRUCTIONS_TO_DECODE as usize + ]); + let encoded = full_xcm_pass.encode(); + assert_eq!(encoded.len(), 12402); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_ok()); + + let nested_xcm_fail = Xcm::<()>(vec![ DepositReserveAsset { assets: All.into(), dest: Here.into(), @@ -1496,10 +1510,10 @@ mod tests { }; (MAX_INSTRUCTIONS_TO_DECODE / 2) as usize ]); - let encoded = nested_xcm.encode(); + let encoded = nested_xcm_fail.encode(); assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); - let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm); 64]); + let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm_fail); 64]); let encoded = even_more_nested_xcm.encode(); assert_eq!(encoded.len(), 342530); // This should not decode since the limit is 100 diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs index 04b19eaa587..6bdde2a967d 100644 --- a/polkadot/xcm/xcm-builder/src/controller.rs +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -21,6 +21,7 @@ use frame_support::{ dispatch::{DispatchErrorWithPostInfo, WithPostDispatchInfo}, pallet_prelude::DispatchError, + parameter_types, BoundedVec, }; use sp_std::boxed::Box; use xcm::prelude::*; @@ -41,8 +42,12 @@ impl Controller f /// Weight functions needed for [`ExecuteController`]. pub trait ExecuteControllerWeightInfo { - /// Weight for [`ExecuteController::execute`] - fn execute() -> Weight; + /// Weight for [`ExecuteController::execute_blob`] + fn execute_blob() -> Weight; +} + +parameter_types! { + pub const MaxXcmEncodedSize: u32 = xcm::MAX_XCM_ENCODED_SIZE; } /// Execute an XCM locally, for a given origin. @@ -61,19 +66,19 @@ pub trait ExecuteController { /// # Parameters /// /// - `origin`: the origin of the call. - /// - `message`: the XCM program to be executed. + /// - `msg`: the encoded XCM to be executed, should be decodable as a [`VersionedXcm`] /// - `max_weight`: the maximum weight that can be consumed by the execution. - fn execute( + fn execute_blob( origin: Origin, - message: Box>, + message: BoundedVec, max_weight: Weight, ) -> Result; } /// Weight functions needed for [`SendController`]. pub trait SendControllerWeightInfo { - /// Weight for [`SendController::send`] - fn send() -> Weight; + /// Weight for [`SendController::send_blob`] + fn send_blob() -> Weight; } /// Send an XCM from a given origin. @@ -93,11 +98,11 @@ pub trait SendController { /// /// - `origin`: the origin of the call. /// - `dest`: the destination of the message. - /// - `msg`: the XCM to be sent. - fn send( + /// - `msg`: the encoded XCM to be sent, should be decodable as a [`VersionedXcm`] + fn send_blob( origin: Origin, dest: Box, - message: Box>, + message: BoundedVec, ) -> Result; } @@ -137,35 +142,35 @@ pub trait QueryController: QueryHandler { impl ExecuteController for () { type WeightInfo = (); - fn execute( + fn execute_blob( _origin: Origin, - _message: Box>, + _message: BoundedVec, _max_weight: Weight, ) -> Result { - Err(DispatchError::Other("ExecuteController::execute not implemented") + Err(DispatchError::Other("ExecuteController::execute_blob not implemented") .with_weight(Weight::zero())) } } impl ExecuteControllerWeightInfo for () { - fn execute() -> Weight { + fn execute_blob() -> Weight { Weight::zero() } } impl SendController for () { type WeightInfo = (); - fn send( + fn send_blob( _origin: Origin, _dest: Box, - _message: Box>, + _message: BoundedVec, ) -> Result { Ok(Default::default()) } } impl SendControllerWeightInfo for () { - fn send() -> Weight { + fn send_blob() -> Weight { Weight::zero() } } diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index e2af8187136..46d0ad227bf 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -43,7 +43,7 @@ pub use barriers::{ mod controller; pub use controller::{ - Controller, ExecuteController, ExecuteControllerWeightInfo, QueryController, + Controller, ExecuteController, ExecuteControllerWeightInfo, MaxXcmEncodedSize, QueryController, QueryControllerWeightInfo, QueryHandler, SendController, SendControllerWeightInfo, }; diff --git a/prdoc/pr_3749.prdoc b/prdoc/pr_3749.prdoc new file mode 100644 index 00000000000..1ebde9670e0 --- /dev/null +++ b/prdoc/pr_3749.prdoc @@ -0,0 +1,47 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "pallet-xcm: deprecate execute and send in favor of execute_blob and send_blob" + +doc: + - audience: Runtime Dev + description: | + pallet-xcm's extrinsics `execute` and `send` have been marked as deprecated. + Please change their usage to the new `execute_blob` and `send_blob`. + The migration from the old extrinsic to the new is very simple. + If you have your message `xcm: VersionedXcm`, then instead of passing in + `Box::new(xcm)` to both `execute` and `send`, you would pass in + `xcm.encode().try_into()` and handle the potential error of its encoded length + being bigger than `MAX_XCM_ENCODED_SIZE`. + + pallet-contracts takes the XCM encoded now as well. It follows the same API as + `execute_blob` and `send_blob`. + - audience: Runtime User + description: | + pallet-xcm has a new pair of extrinsics, `execute_blob` and `send_blob`. + These are meant to be used instead of `execute` and `send`, which are now deprecated + and will be removed eventually. + These new extrinsics just require you to input the encoded XCM. + There's a new utility in PolkadotJS Apps for encoding XCMs you can use: + https://polkadot.js.org/apps/#/utilities/xcm + Just pass in the encoded XCM to the new extrinsics and you're done. + + pallet-contracts takes the XCM encoded now as well. It follows the same API as + `execute_blob` and `send_blob`. + +crates: +- name: pallet-xcm +- name: staging-xcm +- name: staging-xcm-builder +- name: pallet-contracts +- name: asset-hub-rococo-runtime +- name: asset-hub-westend-runtime +- name: bridge-hub-rococo-runtime +- name: bridge-hub-westend-runtime +- name: collectives-westend-runtime +- name: coretime-rococo-runtime +- name: coretime-westend-runtime +- name: people-rococo-runtime +- name: people-westend-runtime +- name: rococo-runtime +- name: westend-runtime diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs index d22221fe8ee..39aa9bebc0f 100644 --- a/substrate/frame/contracts/mock-network/src/tests.rs +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -23,7 +23,6 @@ use crate::{ }; use codec::{Decode, Encode}; use frame_support::{ - assert_err, pallet_prelude::Weight, traits::{fungibles::Mutate, Currency}, }; @@ -102,7 +101,7 @@ fn test_xcm_execute() { 0, Weight::MAX, None, - VersionedXcm::V4(message).encode(), + VersionedXcm::V4(message).encode().encode(), DebugInfo::UnsafeDebug, CollectEvents::UnsafeCollect, Determinism::Enforced, @@ -146,7 +145,7 @@ fn test_xcm_execute_incomplete() { 0, Weight::MAX, None, - VersionedXcm::V4(message).encode(), + VersionedXcm::V4(message).encode().encode(), DebugInfo::UnsafeDebug, CollectEvents::UnsafeCollect, Determinism::Enforced, @@ -160,37 +159,6 @@ fn test_xcm_execute_incomplete() { }); } -#[test] -fn test_xcm_execute_filtered_call() { - MockNet::reset(); - - let contract_addr = instantiate_test_contract("xcm_execute"); - - ParaA::execute_with(|| { - // `remark` should be rejected, as it is not allowed by our CallFilter. - let call = parachain::RuntimeCall::System(frame_system::Call::remark { remark: vec![] }); - let message: Xcm = Xcm(vec![Transact { - origin_kind: OriginKind::Native, - require_weight_at_most: Weight::MAX, - call: call.encode().into(), - }]); - - let result = ParachainContracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - Weight::MAX, - None, - VersionedXcm::V4(message).encode(), - DebugInfo::UnsafeDebug, - CollectEvents::UnsafeCollect, - Determinism::Enforced, - ); - - assert_err!(result.result, frame_system::Error::::CallFiltered); - }); -} - #[test] fn test_xcm_execute_reentrant_call() { MockNet::reset(); @@ -222,7 +190,7 @@ fn test_xcm_execute_reentrant_call() { 0, Weight::MAX, None, - VersionedXcm::V4(message).encode(), + VersionedXcm::V4(message).encode().encode(), DebugInfo::UnsafeDebug, CollectEvents::UnsafeCollect, Determinism::Enforced, @@ -258,7 +226,7 @@ fn test_xcm_send() { 0, Weight::MAX, None, - (dest, message).encode(), + (dest, message.encode()).encode(), DebugInfo::UnsafeDebug, CollectEvents::UnsafeCollect, Determinism::Enforced, diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 6433d4eecdc..e14a4b8bcb8 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -298,6 +298,9 @@ pub mod pallet { /// Therefore please make sure to be restrictive about which dispatchables are allowed /// in order to not introduce a new DoS vector like memory allocation patterns that can /// be exploited to drive the runtime into a panic. + /// + /// This filter does not apply to XCM transact calls. To impose restrictions on XCM transact + /// calls, you must configure them separately within the XCM pallet itself. type CallFilter: Contains<::RuntimeCall>; /// Used to answer contracts' queries regarding the current weight price. This is **not** diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 160dfa0d2f3..28a08ab0224 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -25,12 +25,8 @@ use crate::{ }; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{ - dispatch::DispatchInfo, - ensure, - pallet_prelude::{DispatchResult, DispatchResultWithPostInfo}, - parameter_types, - traits::Get, - weights::Weight, + dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, + traits::Get, weights::Weight, }; use pallet_contracts_proc_macro::define_env; use pallet_contracts_uapi::{CallFlags, ReturnFlags}; @@ -41,9 +37,6 @@ use sp_runtime::{ }; use sp_std::{fmt, prelude::*}; use wasmi::{core::HostError, errors::LinkerError, Linker, Memory, Store}; -use xcm::VersionedXcm; - -type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; @@ -378,29 +371,6 @@ fn already_charged(_: u32) -> Option { None } -/// Ensure that the XCM program is executable, by checking that it does not contain any [`Transact`] -/// instruction with a call that is not allowed by the CallFilter. -fn ensure_executable(message: &VersionedXcm>) -> DispatchResult { - use frame_support::traits::Contains; - use xcm::prelude::{Transact, Xcm}; - - let mut message: Xcm> = - message.clone().try_into().map_err(|_| Error::::XCMDecodeFailed)?; - - message.iter_mut().try_for_each(|inst| -> DispatchResult { - let Transact { ref mut call, .. } = inst else { return Ok(()) }; - let call = call.ensure_decoded().map_err(|_| Error::::XCMDecodeFailed)?; - - if !::CallFilter::contains(call) { - return Err(frame_system::Error::::CallFiltered.into()) - } - - Ok(()) - })?; - - Ok(()) -} - /// Can only be used for one call. pub struct Runtime<'a, E: Ext + 'a> { ext: &'a mut E, @@ -2112,16 +2082,13 @@ pub mod env { msg_len: u32, ) -> Result { use frame_support::dispatch::DispatchInfo; - use xcm::VersionedXcm; use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; - let message: VersionedXcm> = - ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; - ensure_executable::(&message)?; + let message = ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; let execute_weight = - <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute_blob(); let weight = ctx.ext.gas_meter().gas_left().max(execute_weight); let dispatch_info = DispatchInfo { weight, ..Default::default() }; @@ -2130,9 +2097,9 @@ pub mod env { RuntimeCosts::CallXcmExecute, |ctx| { let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); - let weight_used = <::Xcm>::execute( + let weight_used = <::Xcm>::execute_blob( origin, - Box::new(message), + message, weight.saturating_sub(execute_weight), )?; @@ -2152,19 +2119,18 @@ pub mod env { msg_len: u32, output_ptr: u32, ) -> Result { - use xcm::{VersionedLocation, VersionedXcm}; + use xcm::VersionedLocation; use xcm_builder::{SendController, SendControllerWeightInfo}; ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; let dest: VersionedLocation = ctx.read_sandbox_memory_as(memory, dest_ptr)?; - let message: VersionedXcm<()> = - ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; - let weight = <::Xcm as SendController<_>>::WeightInfo::send(); + let message = ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + let weight = <::Xcm as SendController<_>>::WeightInfo::send_blob(); ctx.charge_gas(RuntimeCosts::CallRuntime(weight))?; let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); - match <::Xcm>::send(origin, dest.into(), message.into()) { + match <::Xcm>::send_blob(origin, dest.into(), message) { Ok(message_id) => { ctx.write_sandbox_memory(memory, output_ptr, &message_id.encode())?; Ok(ReturnErrorCode::Success) diff --git a/substrate/frame/contracts/uapi/src/host.rs b/substrate/frame/contracts/uapi/src/host.rs index 04f58895ab4..459cb59bead 100644 --- a/substrate/frame/contracts/uapi/src/host.rs +++ b/substrate/frame/contracts/uapi/src/host.rs @@ -790,7 +790,7 @@ pub trait HostFn { /// /// # Parameters /// - /// - `dest`: The XCM destination, should be decodable as [VersionedMultiLocation](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedMultiLocation.html), + /// - `dest`: The XCM destination, should be decodable as [MultiLocation](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedLocation.html), /// traps otherwise. /// - `msg`: The message, should be decodable as a [VersionedXcm](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedXcm.html), /// traps otherwise. -- GitLab From 5ac32ee2bdd9df47e4578c51810db4d2139757eb Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 27 Mar 2024 11:49:10 +0200 Subject: [PATCH 041/128] authority-discovery: Set intervals to start when authority keys changes (#3764) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The authority-discovery mechanism has implemented a few exponential timers for: - publishing the authority records - goes from 2 seconds (when freshly booted) to 1 hour if the node is long-running - set to 1 hour after successfully publishing the authority record - discovering other authority records - goes from 2 seconds (when freshly booted) to 10 minutes if the node is long-running This PR resets the exponential publishing and discovery interval to defaults ensuring that long-running nodes: - will retry publishing the authority records as aggressively as freshly booted nodes - Currently, if a long-running node fails to publish the DHT record when the keys change (ie DhtEvent::ValuePutFailed), it will only retry after 1 hour - will rediscover other authorities faster (since there is a chance that other authority keys changed) The subp2p-explorer has difficulties discovering the authorities when the authority set changes in the first few hours. This might be entirely due to the recursive nature of the DHT and the needed time to propagate the records. However, there is a small chance that the authority publishing failed and is only retried in 1h. Let me know if this makes sense 🙏 cc @paritytech/networking --------- Signed-off-by: Alexandru Vasile Co-authored-by: Dmitry Markin --- .../authority-discovery/src/interval.rs | 20 +++++++++++-- .../client/authority-discovery/src/worker.rs | 28 +++++++++++++++++-- 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/substrate/client/authority-discovery/src/interval.rs b/substrate/client/authority-discovery/src/interval.rs index 23c7ce266e3..0eee0d159cd 100644 --- a/substrate/client/authority-discovery/src/interval.rs +++ b/substrate/client/authority-discovery/src/interval.rs @@ -28,6 +28,7 @@ use std::{ /// /// Doubles interval duration on each tick until the configured maximum is reached. pub struct ExpIncInterval { + start: Duration, max: Duration, next: Duration, delay: Delay, @@ -37,14 +38,29 @@ impl ExpIncInterval { /// Create a new [`ExpIncInterval`]. pub fn new(start: Duration, max: Duration) -> Self { let delay = Delay::new(start); - Self { max, next: start * 2, delay } + Self { start, max, next: start * 2, delay } } - /// Fast forward the exponentially increasing interval to the configured maximum. + /// Fast forward the exponentially increasing interval to the configured maximum, if not already + /// set. pub fn set_to_max(&mut self) { + if self.next == self.max { + return; + } + self.next = self.max; self.delay = Delay::new(self.next); } + + /// Rewind the exponentially increasing interval to the configured start, if not already set. + pub fn set_to_start(&mut self) { + if self.next == self.start * 2 { + return; + } + + self.next = self.start * 2; + self.delay = Delay::new(self.start); + } } impl Stream for ExpIncInterval { diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index b77f0241ec2..4ad7db5f7da 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -129,6 +129,9 @@ pub struct Worker { /// List of keys onto which addresses have been published at the latest publication. /// Used to check whether they have changed. latest_published_keys: HashSet, + /// List of the kademlia keys that have been published at the latest publication. + /// Used to associate DHT events with our published records. + latest_published_kad_keys: HashSet, /// Same value as in the configuration. publish_non_global_ips: bool, @@ -265,6 +268,7 @@ where publish_interval, publish_if_changed_interval, latest_published_keys: HashSet::new(), + latest_published_kad_keys: HashSet::new(), publish_non_global_ips: config.publish_non_global_ips, public_addresses, strict_record_validation: config.strict_record_validation, @@ -397,8 +401,17 @@ where self.client.as_ref(), ).await?.into_iter().collect::>(); - if only_if_changed && keys == self.latest_published_keys { - return Ok(()) + if only_if_changed { + // If the authority keys did not change and the `publish_if_changed_interval` was + // triggered then do nothing. + if keys == self.latest_published_keys { + return Ok(()) + } + + // We have detected a change in the authority keys, reset the timers to + // publish and gather data faster. + self.publish_interval.set_to_start(); + self.query_interval.set_to_start(); } let addresses = serialize_addresses(self.addresses_to_publish()); @@ -422,6 +435,8 @@ where keys_vec, )?; + self.latest_published_kad_keys = kv_pairs.iter().map(|(k, _)| k.clone()).collect(); + for (key, value) in kv_pairs.into_iter() { self.network.put_value(key, value); } @@ -523,6 +538,10 @@ where } }, DhtEvent::ValuePut(hash) => { + if !self.latest_published_kad_keys.contains(&hash) { + return; + } + // Fast forward the exponentially increasing interval to the configured maximum. In // case this was the first successful address publishing there is no need for a // timely retry. @@ -535,6 +554,11 @@ where debug!(target: LOG_TARGET, "Successfully put hash '{:?}' on Dht.", hash) }, DhtEvent::ValuePutFailed(hash) => { + if !self.latest_published_kad_keys.contains(&hash) { + // Not a value we have published or received multiple times. + return; + } + if let Some(metrics) = &self.metrics { metrics.dht_event_received.with_label_values(&["value_put_failed"]).inc(); } -- GitLab From 25af0adf7836c67e28083276ec6f06d974e4f685 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Wed, 27 Mar 2024 12:50:03 +0100 Subject: [PATCH 042/128] [ci] Collect subsystem-benchmarks results and add graphs for them (#3853) PR adds CI jobs that collect subsystem-benchmarks results and publishes them to gh-pages. cc https://github.com/paritytech/ci_cd/issues/934 cc @AndreiEres --- .github/workflows/subsystem-benchmarks.yml | 42 ++++++++++++ .gitlab-ci.yml | 7 ++ .gitlab/pipeline/publish.yml | 76 ++++++++++++++++++++-- .gitlab/pipeline/test.yml | 6 ++ 4 files changed, 125 insertions(+), 6 deletions(-) create mode 100644 .github/workflows/subsystem-benchmarks.yml diff --git a/.github/workflows/subsystem-benchmarks.yml b/.github/workflows/subsystem-benchmarks.yml new file mode 100644 index 00000000000..37a9e0f4680 --- /dev/null +++ b/.github/workflows/subsystem-benchmarks.yml @@ -0,0 +1,42 @@ +# The actions takes json file as input and runs github-action-benchmark for it. + +on: + workflow_dispatch: + inputs: + benchmark-data-dir-path: + description: "Path to the benchmark data directory" + required: true + type: string + output-file-path: + description: "Path to the benchmark data file" + required: true + type: string + +jobs: + subsystem-benchmarks: + runs-on: ubuntu-latest + steps: + - name: Checkout Sources + uses: actions/checkout@v4.1.2 + with: + fetch-depth: 0 + ref: "gh-pages" + + - name: Copy bench results + id: step_one + run: | + cp bench/gitlab/${{ github.event.inputs.output-file-path }} ${{ github.event.inputs.output-file-path }} + + - name: Switch branch + id: step_two + run: | + git checkout master + + - name: Store benchmark result + uses: benchmark-action/github-action-benchmark@v1 + with: + tool: "customSmallerIsBetter" + output-file-path: ${{ github.event.inputs.output-file-path }} + benchmark-data-dir-path: "bench/${{ github.event.inputs.benchmark-data-dir-path }}" + github-token: ${{ secrets.GITHUB_TOKEN }} + auto-push: true diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 7f8796ca512..93a6ccb9f8f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -147,6 +147,13 @@ default: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues +.publish-gh-pages-refs: + rules: + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - if: $CI_PIPELINE_SOURCE == "web" && $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME == "master" + # handle the specific case where benches could store incorrect bench data because of the downstream staging runs # exclude cargo-check-benches from such runs .test-refs-check-benches: diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index b73acb560f6..bd9387f3c07 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -3,16 +3,13 @@ publish-rustdoc: stage: publish - extends: .kubernetes-env + extends: + - .kubernetes-env + - .publish-gh-pages-refs variables: CI_IMAGE: node:18 GIT_DEPTH: 100 RUSTDOCS_DEPLOY_REFS: "master" - rules: - - if: $CI_PIPELINE_SOURCE == "pipeline" - when: never - - if: $CI_PIPELINE_SOURCE == "web" && $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "master" needs: - job: build-rustdoc artifacts: true @@ -60,9 +57,76 @@ publish-rustdoc: - git commit -m "___Updated docs for ${CI_COMMIT_REF_NAME}___" || echo "___Nothing to commit___" - git push origin gh-pages --force + # artificial sleep to publish gh-pages + - sleep 300 after_script: - rm -rf .git/ ./* +publish-subsystem-benchmarks: + stage: publish + variables: + CI_IMAGE: "paritytech/tools:latest" + extends: + - .kubernetes-env + - .publish-gh-pages-refs + needs: + - job: subsystem-regression-tests + artifacts: true + - job: publish-rustdoc + artifacts: false + script: + # setup ssh + - eval $(ssh-agent) + - ssh-add - <<< ${GITHUB_SSH_PRIV_KEY} + - mkdir ~/.ssh && touch ~/.ssh/known_hosts + - ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts + # Set git config + - rm -rf .git/config + - git config user.email "devops-team@parity.io" + - git config user.name "${GITHUB_USER}" + - git config remote.origin.url "git@github.com:/paritytech/${CI_PROJECT_NAME}.git" + - git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" + - git fetch origin gh-pages + # Push result to github + - git checkout gh-pages + - mkdir -p bench/gitlab/ || echo "Directory exists" + - rm -rf bench/gitlab/*.json || echo "No json files" + - cp -r charts/*.json bench/gitlab/ + - git add bench/gitlab/ + - git commit -m "Add json files with benchmark results for ${CI_COMMIT_REF_NAME}" + - git push origin gh-pages + # artificial sleep to publish gh-pages + - sleep 300 + allow_failure: true + after_script: + - rm -rf .git/ ./* + +trigger_workflow: + stage: deploy + extends: + - .kubernetes-env + - .publish-gh-pages-refs + needs: + - job: publish-subsystem-benchmarks + artifacts: false + - job: subsystem-regression-tests + artifacts: true + script: + - echo "Triggering workflow" + - | + for benchmark in $(ls charts/*.json); do + export bencmark_name=$(basename $benchmark) + echo "Benchmark: $bencmark_name" + export benchmark_dir=$(echo $bencmark_name | sed 's/\.json//') + curl -q -X POST \ + -H "Accept: application/vnd.github.v3+json" \ + -H "Authorization: token $GITHUB_TOKEN" \ + https://api.github.com/repos/paritytech-stg/${CI_PROJECT_NAME}/actions/workflows/subsystem-benchmarks.yml/dispatches \ + -d '{"ref":"refs/heads/master","inputs":{"benchmark-data-dir-path":"'$benchmark_dir'","output-file-path":"'$bencmark_name'"}}' + sleep 300 + done + allow_failure: true + # note: images are used not only in zombienet but also in rococo, wococo and versi .build-push-image: image: $BUILDAH_IMAGE diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 476ac6333f5..d97f9da986c 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -497,6 +497,12 @@ test-syscalls: subsystem-regression-tests: stage: test + artifacts: + name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" + when: on_success + expire_in: 1 days + paths: + - charts/ extends: - .docker-env - .common-refs -- GitLab From 417c54c61c3fc77bd0402eec2c1ccfdb4ad38f8e Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Wed, 27 Mar 2024 16:44:10 +0200 Subject: [PATCH 043/128] collation-generation + collator-protocol: collate on multiple assigned cores (#3795) This works only for collators that implement the `collator_fn` allowing `collation-generation` subsystem to pull collations triggered on new heads. Also enables `request_v2::CollationFetchingResponse::CollationWithParentHeadData` for test adder/undying collators. TODO: - [x] fix tests - [x] new tests - [x] PR doc --------- Signed-off-by: Andrei Sandu --- .../consensus/aura/src/collators/lookahead.rs | 39 ++- .../node/collation-generation/src/error.rs | 2 + polkadot/node/collation-generation/src/lib.rs | 183 +++++++----- .../node/collation-generation/src/tests.rs | 261 +++++++++++++++++- .../src/collator_side/mod.rs | 129 ++++++--- .../src/collator_side/tests/mod.rs | 1 + .../tests/prospective_parachains.rs | 2 + .../src/collator_side/validators_buffer.rs | 18 +- polkadot/node/primitives/src/lib.rs | 6 +- polkadot/node/subsystem-types/src/messages.rs | 2 + polkadot/node/subsystem-util/src/lib.rs | 3 +- .../test-parachains/adder/collator/Cargo.toml | 2 +- .../undying/collator/Cargo.toml | 2 +- polkadot/runtime/test-runtime/src/lib.rs | 34 ++- prdoc/pr_3795.prdoc | 14 + 15 files changed, 557 insertions(+), 141 deletions(-) create mode 100644 prdoc/pr_3795.prdoc diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 161f10d55a1..58005833617 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -49,7 +49,7 @@ use polkadot_node_subsystem::messages::{ CollationGenerationMessage, RuntimeApiMessage, RuntimeApiRequest, }; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{CollatorPair, Id as ParaId, OccupiedCoreAssumption}; +use polkadot_primitives::{CollatorPair, CoreIndex, Id as ParaId, OccupiedCoreAssumption}; use futures::{channel::oneshot, prelude::*}; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; @@ -184,7 +184,15 @@ where while let Some(relay_parent_header) = import_notifications.next().await { let relay_parent = relay_parent_header.hash(); - if !is_para_scheduled(relay_parent, params.para_id, &mut params.overseer_handle).await { + // TODO: Currently we use just the first core here, but for elastic scaling + // we iterate and build on all of the cores returned. + let core_index = if let Some(core_index) = + cores_scheduled_for_para(relay_parent, params.para_id, &mut params.overseer_handle) + .await + .get(0) + { + *core_index + } else { tracing::trace!( target: crate::LOG_TARGET, ?relay_parent, @@ -193,7 +201,7 @@ where ); continue - } + }; let max_pov_size = match params .relay_client @@ -396,6 +404,7 @@ where parent_head: parent_header.encode().into(), validation_code_hash, result_sender: None, + core_index, }, ), "SubmitCollation", @@ -480,14 +489,12 @@ async fn max_ancestry_lookback( } } -// Checks if there exists a scheduled core for the para at the provided relay parent. -// -// Falls back to `false` in case of an error. -async fn is_para_scheduled( +// Return all the cores assigned to the para at the provided relay parent. +async fn cores_scheduled_for_para( relay_parent: PHash, para_id: ParaId, overseer_handle: &mut OverseerHandle, -) -> bool { +) -> Vec { let (tx, rx) = oneshot::channel(); let request = RuntimeApiRequest::AvailabilityCores(tx); overseer_handle @@ -503,7 +510,7 @@ async fn is_para_scheduled( ?relay_parent, "Failed to query availability cores runtime API", ); - return false + return Vec::new() }, Err(oneshot::Canceled) => { tracing::error!( @@ -511,9 +518,19 @@ async fn is_para_scheduled( ?relay_parent, "Sender for availability cores runtime request dropped", ); - return false + return Vec::new() }, }; - cores.iter().any(|core| core.para_id() == Some(para_id)) + cores + .iter() + .enumerate() + .filter_map(|(index, core)| { + if core.para_id() == Some(para_id) { + Some(CoreIndex(index as u32)) + } else { + None + } + }) + .collect() } diff --git a/polkadot/node/collation-generation/src/error.rs b/polkadot/node/collation-generation/src/error.rs index ac5db6cd7f2..852c50f3068 100644 --- a/polkadot/node/collation-generation/src/error.rs +++ b/polkadot/node/collation-generation/src/error.rs @@ -28,6 +28,8 @@ pub enum Error { Util(#[from] polkadot_node_subsystem_util::Error), #[error(transparent)] Erasure(#[from] polkadot_erasure_coding::Error), + #[error("Parachain backing state not available in runtime.")] + MissingParaBackingState, } pub type Result = std::result::Result; diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index 3b1a8f5ff23..3164f6078bc 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -44,8 +44,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::{ has_required_runtime, request_async_backing_params, request_availability_cores, - request_claim_queue, request_persisted_validation_data, request_validation_code, - request_validation_code_hash, request_validators, + request_claim_queue, request_para_backing_state, request_persisted_validation_data, + request_validation_code, request_validation_code_hash, request_validators, }; use polkadot_primitives::{ collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt, @@ -212,6 +212,7 @@ async fn handle_new_activations( if config.collator.is_none() { return Ok(()) } + let para_id = config.para_id; let _overall_timer = metrics.time_new_activations(); @@ -225,25 +226,23 @@ async fn handle_new_activations( ); let availability_cores = availability_cores??; - let n_validators = validators??.len(); let async_backing_params = async_backing_params?.ok(); + let n_validators = validators??.len(); let maybe_claim_queue = fetch_claim_queue(ctx.sender(), relay_parent).await?; - for (core_idx, core) in availability_cores.into_iter().enumerate() { - let _availability_core_timer = metrics.time_new_activations_availability_core(); + // The loop bellow will fill in cores that the para is allowed to build on. + let mut cores_to_build_on = Vec::new(); - let (scheduled_core, assumption) = match core { - CoreState::Scheduled(scheduled_core) => - (scheduled_core, OccupiedCoreAssumption::Free), + for (core_idx, core) in availability_cores.into_iter().enumerate() { + let scheduled_core = match core { + CoreState::Scheduled(scheduled_core) => scheduled_core, CoreState::Occupied(occupied_core) => match async_backing_params { Some(params) if params.max_candidate_depth >= 1 => { // maximum candidate depth when building on top of a block // pending availability is necessarily 1 - the depth of the // pending block is 0 so the child has depth 1. - // TODO [now]: this assumes that next up == current. - // in practice we should only set `OccupiedCoreAssumption::Included` - // when the candidate occupying the core is also of the same para. + // Use claim queue if available, or fallback to `next_up_on_available` let res = match maybe_claim_queue { Some(ref claim_queue) => { // read what's in the claim queue for this core @@ -257,8 +256,7 @@ async fn handle_new_activations( // `next_up_on_available` occupied_core.next_up_on_available }, - } - .map(|scheduled| (scheduled, OccupiedCoreAssumption::Included)); + }; match res { Some(res) => res, @@ -279,7 +277,7 @@ async fn handle_new_activations( gum::trace!( target: LOG_TARGET, core_idx = %core_idx, - "core is free. Keep going.", + "core is not assigned to any para. Keep going.", ); continue }, @@ -294,64 +292,90 @@ async fn handle_new_activations( their_para = %scheduled_core.para_id, "core is not assigned to our para. Keep going.", ); - continue + } else { + // Accumulate cores for building collation(s) outside the loop. + cores_to_build_on.push(CoreIndex(core_idx as u32)); } + } - // we get validation data and validation code synchronously for each core instead of - // within the subtask loop, because we have only a single mutable handle to the - // context, so the work can't really be distributed - - let validation_data = match request_persisted_validation_data( - relay_parent, - scheduled_core.para_id, - assumption, - ctx.sender(), - ) - .await - .await?? - { - Some(v) => v, - None => { - gum::trace!( - target: LOG_TARGET, - core_idx = %core_idx, - relay_parent = ?relay_parent, - our_para = %config.para_id, - their_para = %scheduled_core.para_id, - "validation data is not available", - ); - continue - }, - }; + // Skip to next relay parent if there is no core assigned to us. + if cores_to_build_on.is_empty() { + continue + } - let validation_code_hash = match obtain_validation_code_hash_with_assumption( - relay_parent, - scheduled_core.para_id, - assumption, - ctx.sender(), - ) - .await? - { - Some(v) => v, - None => { - gum::trace!( - target: LOG_TARGET, - core_idx = %core_idx, - relay_parent = ?relay_parent, - our_para = %config.para_id, - their_para = %scheduled_core.para_id, - "validation code hash is not found.", - ); - continue - }, - }; + let para_backing_state = + request_para_backing_state(relay_parent, config.para_id, ctx.sender()) + .await + .await?? + .ok_or(crate::error::Error::MissingParaBackingState)?; + + // We are being very optimistic here, but one of the cores could pend availability some more + // block, ore even time out. + // For timeout assumption the collator can't really know because it doesn't receive bitfield + // gossip. + let assumption = if para_backing_state.pending_availability.is_empty() { + OccupiedCoreAssumption::Free + } else { + OccupiedCoreAssumption::Included + }; + + gum::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + our_para = %config.para_id, + ?assumption, + "Occupied core(s) assumption", + ); + + let mut validation_data = match request_persisted_validation_data( + relay_parent, + config.para_id, + assumption, + ctx.sender(), + ) + .await + .await?? + { + Some(v) => v, + None => { + gum::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + our_para = %config.para_id, + "validation data is not available", + ); + continue + }, + }; - let task_config = config.clone(); - let metrics = metrics.clone(); - let mut task_sender = ctx.sender().clone(); - ctx.spawn( - "collation-builder", - Box::pin(async move { + let validation_code_hash = match obtain_validation_code_hash_with_assumption( + relay_parent, + config.para_id, + assumption, + ctx.sender(), + ) + .await? + { + Some(v) => v, + None => { + gum::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + our_para = %config.para_id, + "validation code hash is not found.", + ); + continue + }, + }; + + let task_config = config.clone(); + let metrics = metrics.clone(); + let mut task_sender = ctx.sender().clone(); + + ctx.spawn( + "chained-collation-builder", + Box::pin(async move { + for core_index in cores_to_build_on { let collator_fn = match task_config.collator.as_ref() { Some(x) => x, None => return, @@ -363,21 +387,23 @@ async fn handle_new_activations( None => { gum::debug!( target: LOG_TARGET, - para_id = %scheduled_core.para_id, + ?para_id, "collator returned no collation on collate", ); return }, }; + let parent_head = collation.head_data.clone(); construct_and_distribute_receipt( PreparedCollation { collation, - para_id: scheduled_core.para_id, + para_id, relay_parent, - validation_data, + validation_data: validation_data.clone(), validation_code_hash, n_validators, + core_index, }, task_config.key.clone(), &mut task_sender, @@ -385,9 +411,13 @@ async fn handle_new_activations( &metrics, ) .await; - }), - )?; - } + + // Chain the collations. All else stays the same as we build the chained + // collation on same relay parent. + validation_data.parent_head = parent_head; + } + }), + )?; } Ok(()) @@ -408,6 +438,7 @@ async fn handle_submit_collation( parent_head, validation_code_hash, result_sender, + core_index, } = params; let validators = request_validators(relay_parent, ctx.sender()).await.await??; @@ -444,6 +475,7 @@ async fn handle_submit_collation( validation_data, validation_code_hash, n_validators, + core_index, }; construct_and_distribute_receipt( @@ -465,6 +497,7 @@ struct PreparedCollation { validation_data: PersistedValidationData, validation_code_hash: ValidationCodeHash, n_validators: usize, + core_index: CoreIndex, } /// Takes a prepared collation, along with its context, and produces a candidate receipt @@ -483,6 +516,7 @@ async fn construct_and_distribute_receipt( validation_data, validation_code_hash, n_validators, + core_index, } = collation; let persisted_validation_data_hash = validation_data.hash(); @@ -578,6 +612,7 @@ async fn construct_and_distribute_receipt( pov, parent_head_data, result_sender, + core_index, }) .await; } diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs index 9b16980e6af..3cb3e61a35a 100644 --- a/polkadot/node/collation-generation/src/tests.rs +++ b/polkadot/node/collation-generation/src/tests.rs @@ -30,13 +30,16 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers::{subsystem_test_harness, TestSubsystemContextHandle}; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::{ - AsyncBackingParams, CollatorPair, HeadData, Id as ParaId, Id, PersistedValidationData, + async_backing::{BackingState, CandidatePendingAvailability}, + AsyncBackingParams, BlockNumber, CollatorPair, HeadData, PersistedValidationData, ScheduledCore, ValidationCode, }; use rstest::rstest; use sp_keyring::sr25519::Keyring as Sr25519Keyring; use std::pin::Pin; -use test_helpers::{dummy_candidate_descriptor, dummy_hash, dummy_head_data, dummy_validator}; +use test_helpers::{ + dummy_candidate_descriptor, dummy_hash, dummy_head_data, dummy_validator, make_candidate, +}; type VirtualOverseer = TestSubsystemContextHandle; @@ -105,9 +108,9 @@ impl Future for TestCollator { impl Unpin for TestCollator {} -async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages { - const TIMEOUT: std::time::Duration = std::time::Duration::from_millis(2000); +const TIMEOUT: std::time::Duration = std::time::Duration::from_millis(2000); +async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages { overseer .recv() .timeout(TIMEOUT) @@ -135,6 +138,41 @@ fn scheduled_core_for>(para_id: Id) -> ScheduledCore { ScheduledCore { para_id: para_id.into(), collator: None } } +fn dummy_candidate_pending_availability( + para_id: ParaId, + candidate_relay_parent: Hash, + relay_parent_number: BlockNumber, +) -> CandidatePendingAvailability { + let (candidate, _pvd) = make_candidate( + candidate_relay_parent, + relay_parent_number, + para_id, + dummy_head_data(), + HeadData(vec![1]), + ValidationCode(vec![1, 2, 3]).hash(), + ); + let candidate_hash = candidate.hash(); + + CandidatePendingAvailability { + candidate_hash, + descriptor: candidate.descriptor, + commitments: candidate.commitments, + relay_parent_number, + max_pov_size: 5 * 1024 * 1024, + } +} + +fn dummy_backing_state(pending_availability: Vec) -> BackingState { + let constraints = helpers::dummy_constraints( + 0, + vec![0], + dummy_head_data(), + ValidationCodeHash::from(Hash::repeat_byte(42)), + ); + + BackingState { constraints, pending_availability } +} + #[rstest] #[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] #[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] @@ -176,6 +214,12 @@ fn requests_availability_per_relay_parent(#[case] runtime_version: u32) { ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { tx.send(Ok(BTreeMap::new())).unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ParaBackingState(_para_id, tx), + ))) => { + tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); + }, Some(msg) => panic!("didn't expect any other overseer requests given no availability cores; got {:?}", msg), } } @@ -273,6 +317,12 @@ fn requests_validation_data_for_scheduled_matches(#[case] runtime_version: u32) ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { tx.send(Ok(BTreeMap::new())).unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ParaBackingState(_para_id, tx), + ))) => { + tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); + }, Some(msg) => { panic!("didn't expect any other overseer requests; got {:?}", msg) }, @@ -384,6 +434,12 @@ fn sends_distribute_collation_message(#[case] runtime_version: u32) { ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { tx.send(Ok(BTreeMap::new())).unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ParaBackingState(_para_id, tx), + ))) => { + tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); + }, Some(msg @ AllMessages::CollatorProtocol(_)) => { inner_to_collator_protocol.lock().await.push(msg); }, @@ -564,6 +620,12 @@ fn fallback_when_no_validation_code_hash_api(#[case] runtime_version: u32) { let res = BTreeMap::>::new(); tx.send(Ok(res)).unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ParaBackingState(_para_id, tx), + ))) => { + tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); + }, Some(msg) => { panic!("didn't expect any other overseer requests; got {:?}", msg) }, @@ -611,6 +673,7 @@ fn submit_collation_is_no_op_before_initialization() { parent_head: vec![1, 2, 3].into(), validation_code_hash: Hash::repeat_byte(1).into(), result_sender: None, + core_index: CoreIndex(0), }), }) .await; @@ -647,6 +710,7 @@ fn submit_collation_leads_to_distribution() { parent_head: vec![1, 2, 3].into(), validation_code_hash, result_sender: None, + core_index: CoreIndex(0), }), }) .await; @@ -721,6 +785,9 @@ fn distribute_collation_for_occupied_core_with_async_backing_enabled(#[case] run test_harness(|mut virtual_overseer| async move { helpers::initialize_collator(&mut virtual_overseer, para_id).await; helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + + let pending_availability = + vec![dummy_candidate_pending_availability(para_id, activated_hash, 1)]; helpers::handle_runtime_calls_on_new_head_activation( &mut virtual_overseer, activated_hash, @@ -728,14 +795,140 @@ fn distribute_collation_for_occupied_core_with_async_backing_enabled(#[case] run cores, runtime_version, claim_queue, + pending_availability, + ) + .await; + helpers::handle_core_processing_for_a_leaf( + &mut virtual_overseer, + activated_hash, + para_id, + // `CoreState` is `Occupied` => `OccupiedCoreAssumption` is `Included` + OccupiedCoreAssumption::Included, + 1, + ) + .await; + + virtual_overseer + }); +} + +// There are variable number of cores of cores in `Occupied` state and async backing is enabled. +// On new head activation `CollationGeneration` should produce and distribute a new collation +// with proper assumption about the para candidate chain availability at next block. +#[rstest] +#[case(0)] +#[case(1)] +#[case(2)] +fn distribute_collation_for_occupied_cores_with_async_backing_enabled_and_elastic_scaling( + #[case] candidates_pending_avail: u32, +) { + let activated_hash: Hash = [1; 32].into(); + let para_id = ParaId::from(5); + + let cores = (0..candidates_pending_avail) + .into_iter() + .map(|idx| { + CoreState::Occupied(polkadot_primitives::OccupiedCore { + next_up_on_available: Some(ScheduledCore { para_id, collator: None }), + occupied_since: 0, + time_out_at: 10, + next_up_on_time_out: Some(ScheduledCore { para_id, collator: None }), + availability: Default::default(), // doesn't matter + group_responsible: polkadot_primitives::GroupIndex(idx as u32), + candidate_hash: Default::default(), + candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), + }) + }) + .collect::>(); + + let pending_availability = (0..candidates_pending_avail) + .into_iter() + .map(|_idx| dummy_candidate_pending_availability(para_id, activated_hash, 0)) + .collect::>(); + + let claim_queue = cores + .iter() + .enumerate() + .map(|(idx, _core)| (CoreIndex::from(idx as u32), VecDeque::from([para_id]))) + .collect::>(); + let total_cores = cores.len(); + + test_harness(|mut virtual_overseer| async move { + helpers::initialize_collator(&mut virtual_overseer, para_id).await; + helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + helpers::handle_runtime_calls_on_new_head_activation( + &mut virtual_overseer, + activated_hash, + AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, + cores, + // Using latest runtime with the fancy claim queue exposed. + RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + claim_queue, + pending_availability, ) .await; + helpers::handle_core_processing_for_a_leaf( &mut virtual_overseer, activated_hash, para_id, // `CoreState` is `Occupied` => `OccupiedCoreAssumption` is `Included` OccupiedCoreAssumption::Included, + total_cores, + ) + .await; + + virtual_overseer + }); +} + +// There are variable number of cores of cores in `Free` state and async backing is enabled. +// On new head activation `CollationGeneration` should produce and distribute a new collation +// with proper assumption about the para candidate chain availability at next block. +#[rstest] +#[case(0)] +#[case(1)] +#[case(2)] +fn distribute_collation_for_free_cores_with_async_backing_enabled_and_elastic_scaling( + #[case] candidates_pending_avail: u32, +) { + let activated_hash: Hash = [1; 32].into(); + let para_id = ParaId::from(5); + + let cores = (0..candidates_pending_avail) + .into_iter() + .map(|_idx| CoreState::Scheduled(ScheduledCore { para_id, collator: None })) + .collect::>(); + + let claim_queue = cores + .iter() + .enumerate() + .map(|(idx, _core)| (CoreIndex::from(idx as u32), VecDeque::from([para_id]))) + .collect::>(); + let total_cores = cores.len(); + + test_harness(|mut virtual_overseer| async move { + helpers::initialize_collator(&mut virtual_overseer, para_id).await; + helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + helpers::handle_runtime_calls_on_new_head_activation( + &mut virtual_overseer, + activated_hash, + AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, + cores, + // Using latest runtime with the fancy claim queue exposed. + RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + claim_queue, + vec![], + ) + .await; + + helpers::handle_core_processing_for_a_leaf( + &mut virtual_overseer, + activated_hash, + para_id, + // `CoreState` is `Free` => `OccupiedCoreAssumption` is `Free` + OccupiedCoreAssumption::Free, + total_cores, ) .await; @@ -777,6 +970,7 @@ fn no_collation_is_distributed_for_occupied_core_with_async_backing_disabled( cores, runtime_version, claim_queue, + vec![], ) .await; @@ -785,8 +979,38 @@ fn no_collation_is_distributed_for_occupied_core_with_async_backing_disabled( } mod helpers { + use polkadot_primitives::{ + async_backing::{Constraints, InboundHrmpLimitations}, + BlockNumber, + }; + use super::*; + // A set for dummy constraints for `ParaBackingState`` + pub(crate) fn dummy_constraints( + min_relay_parent_number: BlockNumber, + valid_watermarks: Vec, + required_parent: HeadData, + validation_code_hash: ValidationCodeHash, + ) -> Constraints { + Constraints { + min_relay_parent_number, + max_pov_size: 5 * 1024 * 1024, + max_code_size: 1_000_000, + ump_remaining: 10, + ump_remaining_bytes: 1_000, + max_ump_num_per_candidate: 10, + dmp_remaining_messages: vec![], + hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, + hrmp_channels_out: vec![], + max_hrmp_num_per_candidate: 0, + required_parent, + validation_code_hash, + upgrade_restriction: None, + future_validation_code: None, + } + } + // Sends `Initialize` with a collator config pub async fn initialize_collator(virtual_overseer: &mut VirtualOverseer, para_id: ParaId) { virtual_overseer @@ -822,7 +1046,8 @@ mod helpers { async_backing_params: AsyncBackingParams, cores: Vec, runtime_version: u32, - claim_queue: BTreeMap>, + claim_queue: BTreeMap>, + pending_availability: Vec, ) { assert_matches!( overseer_recv(virtual_overseer).await, @@ -857,6 +1082,25 @@ mod helpers { } ); + // Process the `ParaBackingState` message, and return some dummy state. + let message = overseer_recv(virtual_overseer).await; + let para_id = match message { + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::ParaBackingState(p_id, _), + )) => p_id, + _ => panic!("received unexpected message {:?}", message), + }; + + assert_matches!( + message, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ParaBackingState(p_id, tx)) + ) if parent == activated_hash && p_id == para_id => { + tx.send(Ok(Some(dummy_backing_state(pending_availability)))).unwrap(); + } + ); + assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -889,7 +1133,14 @@ mod helpers { activated_hash: Hash, para_id: ParaId, expected_occupied_core_assumption: OccupiedCoreAssumption, + cores_assigned: usize, ) { + // Expect no messages if no cores is assigned to the para + if cores_assigned == 0 { + assert!(overseer_recv(virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + return + } + // Some hardcoded data - if needed, extract to parameters let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42)); let parent_head = HeadData::from(vec![1, 2, 3]); diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 9f306f288a1..e6aa55235b7 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -203,20 +203,40 @@ struct PeerData { version: CollationVersion, } +/// A type wrapping a collation and it's designated core index. +struct CollationWithCoreIndex(Collation, CoreIndex); + +impl CollationWithCoreIndex { + /// Returns inner collation ref. + pub fn collation(&self) -> &Collation { + &self.0 + } + + /// Returns inner collation mut ref. + pub fn collation_mut(&mut self) -> &mut Collation { + &mut self.0 + } + + /// Returns inner core index. + pub fn core_index(&self) -> &CoreIndex { + &self.1 + } +} + struct PerRelayParent { prospective_parachains_mode: ProspectiveParachainsMode, - /// Validators group responsible for backing candidates built + /// Per core index validators group responsible for backing candidates built /// on top of this relay parent. - validator_group: ValidatorGroup, + validator_group: HashMap, /// Distributed collations. - collations: HashMap, + collations: HashMap, } impl PerRelayParent { fn new(mode: ProspectiveParachainsMode) -> Self { Self { prospective_parachains_mode: mode, - validator_group: ValidatorGroup::default(), + validator_group: HashMap::default(), collations: HashMap::new(), } } @@ -350,6 +370,7 @@ async fn distribute_collation( pov: PoV, parent_head_data: HeadData, result_sender: Option>, + core_index: CoreIndex, ) -> Result<()> { let candidate_relay_parent = receipt.descriptor.relay_parent; let candidate_hash = receipt.hash(); @@ -422,7 +443,22 @@ async fn distribute_collation( ); } - let our_core = our_cores[0]; + // Double check that the specified `core_index` is among the ones our para has assignments for. + if !our_cores.iter().any(|assigned_core| assigned_core == &core_index) { + gum::warn!( + target: LOG_TARGET, + para_id = %id, + relay_parent = ?candidate_relay_parent, + cores = ?our_cores, + ?core_index, + "Attempting to distribute collation for a core we are not assigned to ", + ); + + return Ok(()) + } + + let our_core = core_index; + // Determine the group on that core. // // When prospective parachains are disabled, candidate relay parent here is @@ -464,10 +500,12 @@ async fn distribute_collation( "Accepted collation, connecting to validators." ); - let validators_at_relay_parent = &mut per_relay_parent.validator_group.validators; - if validators_at_relay_parent.is_empty() { - *validators_at_relay_parent = validators; - } + // Insert validator group for the `core_index` at relay parent. + per_relay_parent.validator_group.entry(core_index).or_insert_with(|| { + let mut group = ValidatorGroup::default(); + group.validators = validators; + group + }); // Update a set of connected validators if necessary. connect_to_validators(ctx, &state.validator_groups_buf).await; @@ -484,7 +522,10 @@ async fn distribute_collation( per_relay_parent.collations.insert( candidate_hash, - Collation { receipt, pov, parent_head_data, status: CollationStatus::Created }, + CollationWithCoreIndex( + Collation { receipt, pov, parent_head_data, status: CollationStatus::Created }, + core_index, + ), ); // If prospective parachains are disabled, a leaf should be known to peer. @@ -690,7 +731,10 @@ async fn advertise_collation( advertisement_timeouts: &mut FuturesUnordered, metrics: &Metrics, ) { - for (candidate_hash, collation) in per_relay_parent.collations.iter_mut() { + for (candidate_hash, collation_and_core) in per_relay_parent.collations.iter_mut() { + let core_index = *collation_and_core.core_index(); + let collation = collation_and_core.collation_mut(); + // Check that peer will be able to request the collation. if let CollationVersion::V1 = protocol_version { if per_relay_parent.prospective_parachains_mode.is_enabled() { @@ -704,11 +748,17 @@ async fn advertise_collation( } } - let should_advertise = - per_relay_parent - .validator_group - .should_advertise_to(candidate_hash, peer_ids, &peer); + let Some(validator_group) = per_relay_parent.validator_group.get_mut(&core_index) else { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + ?core_index, + "Skipping advertising to validator, validator group for core not found", + ); + return + }; + let should_advertise = validator_group.should_advertise_to(candidate_hash, peer_ids, &peer); match should_advertise { ShouldAdvertiseTo::Yes => {}, ShouldAdvertiseTo::NotAuthority | ShouldAdvertiseTo::AlreadyAdvertised => { @@ -756,9 +806,7 @@ async fn advertise_collation( )) .await; - per_relay_parent - .validator_group - .advertised_to_peer(candidate_hash, &peer_ids, peer); + validator_group.advertised_to_peer(candidate_hash, &peer_ids, peer); advertisement_timeouts.push(ResetInterestTimeout::new( *candidate_hash, @@ -790,6 +838,7 @@ async fn process_msg( pov, parent_head_data, result_sender, + core_index, } => { let _span1 = state .span_per_relay_parent @@ -820,6 +869,7 @@ async fn process_msg( pov, parent_head_data, result_sender, + core_index, ) .await?; }, @@ -1053,7 +1103,7 @@ async fn handle_incoming_request( }; let mode = per_relay_parent.prospective_parachains_mode; - let collation = match &req { + let collation_with_core = match &req { VersionedCollationRequest::V1(_) if !mode.is_enabled() => per_relay_parent.collations.values_mut().next(), VersionedCollationRequest::V2(req) => @@ -1070,22 +1120,24 @@ async fn handle_incoming_request( return Ok(()) }, }; - let (receipt, pov, parent_head_data) = if let Some(collation) = collation { - collation.status.advance_to_requested(); - ( - collation.receipt.clone(), - collation.pov.clone(), - collation.parent_head_data.clone(), - ) - } else { - gum::warn!( - target: LOG_TARGET, - relay_parent = %relay_parent, - "received a `RequestCollation` for a relay parent we don't have collation stored.", - ); + let (receipt, pov, parent_head_data) = + if let Some(collation_with_core) = collation_with_core { + let collation = collation_with_core.collation_mut(); + collation.status.advance_to_requested(); + ( + collation.receipt.clone(), + collation.pov.clone(), + collation.parent_head_data.clone(), + ) + } else { + gum::warn!( + target: LOG_TARGET, + relay_parent = %relay_parent, + "received a `RequestCollation` for a relay parent we don't have collation stored.", + ); - return Ok(()) - }; + return Ok(()) + }; state.metrics.on_collation_sent_requested(); @@ -1340,7 +1392,9 @@ where .remove(removed) .map(|per_relay_parent| per_relay_parent.collations) .unwrap_or_default(); - for collation in collations.into_values() { + for collation_with_core in collations.into_values() { + let collation = collation_with_core.collation(); + let candidate_hash = collation.receipt.hash(); state.collation_result_senders.remove(&candidate_hash); state.validator_groups_buf.remove_candidate(&candidate_hash); @@ -1477,7 +1531,7 @@ async fn run_inner( continue }; - let next_collation = { + let next_collation_with_core = { let per_relay_parent = match state.per_relay_parent.get(&relay_parent) { Some(per_relay_parent) => per_relay_parent, None => continue, @@ -1497,7 +1551,8 @@ async fn run_inner( } }; - if let Some(collation) = next_collation { + if let Some(collation_with_core) = next_collation_with_core { + let collation = collation_with_core.collation(); let receipt = collation.receipt.clone(); let pov = collation.pov.clone(); let parent_head_data = collation.parent_head_data.clone(); diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index 38e6780eb7d..bcf0b34e631 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -377,6 +377,7 @@ async fn distribute_collation_with_receipt( pov: pov.clone(), parent_head_data: HeadData(vec![1, 2, 3]), result_sender: None, + core_index: CoreIndex(0), }, ) .await; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index e419cd5444f..70705354563 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -277,6 +277,7 @@ fn distribute_collation_from_implicit_view() { pov: pov.clone(), parent_head_data: HeadData(vec![1, 2, 3]), result_sender: None, + core_index: CoreIndex(0), }, ) .await; @@ -358,6 +359,7 @@ fn distribute_collation_up_to_limit() { pov: pov.clone(), parent_head_data: HeadData(vec![1, 2, 3]), result_sender: None, + core_index: CoreIndex(0), }, ) .await; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs index 1533f2eda5a..fbb3ff4328a 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs @@ -45,14 +45,22 @@ use futures::FutureExt; use polkadot_node_network_protocol::PeerId; use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, GroupIndex, SessionIndex}; +/// Elastic scaling: how many candidates per relay chain block the collator supports building. +pub const MAX_CHAINED_CANDIDATES_PER_RCB: NonZeroUsize = match NonZeroUsize::new(3) { + Some(cap) => cap, + None => panic!("max candidates per rcb cannot be zero"), +}; + /// The ring buffer stores at most this many unique validator groups. /// /// This value should be chosen in way that all groups assigned to our para -/// in the view can fit into the buffer. -pub const VALIDATORS_BUFFER_CAPACITY: NonZeroUsize = match NonZeroUsize::new(3) { - Some(cap) => cap, - None => panic!("buffer capacity must be non-zero"), -}; +/// in the view can fit into the buffer multiplied by amount of candidates we support per relay +/// chain block in the case of elastic scaling. +pub const VALIDATORS_BUFFER_CAPACITY: NonZeroUsize = + match NonZeroUsize::new(3 * MAX_CHAINED_CANDIDATES_PER_RCB.get()) { + Some(cap) => cap, + None => panic!("buffer capacity must be non-zero"), + }; /// Unique identifier of a validators group. #[derive(Debug)] diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index b102cf06c38..b127d87d4ea 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -31,8 +31,8 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use polkadot_primitives::{ BlakeTwo256, BlockNumber, CandidateCommitments, CandidateHash, CollatorPair, - CommittedCandidateReceipt, CompactStatement, EncodeAs, Hash, HashT, HeadData, Id as ParaId, - PersistedValidationData, SessionIndex, Signed, UncheckedSigned, ValidationCode, + CommittedCandidateReceipt, CompactStatement, CoreIndex, EncodeAs, Hash, HashT, HeadData, + Id as ParaId, PersistedValidationData, SessionIndex, Signed, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorIndex, MAX_CODE_SIZE, MAX_POV_SIZE, }; pub use sp_consensus_babe::{ @@ -524,6 +524,8 @@ pub struct SubmitCollationParams { /// okay to just drop it. However, if it is called, it should be called with the signed /// statement of a parachain validator seconding the collation. pub result_sender: Option>, + /// The core index on which the resulting candidate should be backed + pub core_index: CoreIndex, } /// This is the data we keep available for each candidate included in the relay chain. diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 5d05d2b56ed..d84b0b6dd14 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -228,6 +228,8 @@ pub enum CollatorProtocolMessage { /// The result sender should be informed when at least one parachain validator seconded the /// collation. It is also completely okay to just drop the sender. result_sender: Option>, + /// The core index where the candidate should be backed. + core_index: CoreIndex, }, /// Report a collator as having provided an invalid collation. This should lead to disconnect /// and blacklist of the collator. diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index 6ff09ed5f22..83b046f0bf0 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -30,7 +30,7 @@ use polkadot_node_subsystem::{ messages::{RuntimeApiMessage, RuntimeApiRequest, RuntimeApiSender}, overseer, SubsystemSender, }; -use polkadot_primitives::{slashing, CoreIndex, ExecutorParams}; +use polkadot_primitives::{async_backing::BackingState, slashing, CoreIndex, ExecutorParams}; pub use overseer::{ gen::{OrchestraError as OverseerError, Timeout}, @@ -308,6 +308,7 @@ specialize_requests! { fn request_disabled_validators() -> Vec; DisabledValidators; fn request_async_backing_params() -> AsyncBackingParams; AsyncBackingParams; fn request_claim_queue() -> BTreeMap>; ClaimQueue; + fn request_para_backing_state(para_id: ParaId) -> Option; ParaBackingState; } /// Requests executor parameters from the runtime effective at given relay-parent. First obtains diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index cb283c27119..30bce806f9f 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -24,7 +24,7 @@ log = { workspace = true, default-features = true } test-parachain-adder = { path = ".." } polkadot-primitives = { path = "../../../../primitives" } polkadot-cli = { path = "../../../../cli" } -polkadot-service = { path = "../../../../node/service", features = ["rococo-native"] } +polkadot-service = { path = "../../../../node/service", features = ["elastic-scaling-experimental", "rococo-native"] } polkadot-node-primitives = { path = "../../../../node/primitives" } polkadot-node-subsystem = { path = "../../../../node/subsystem" } diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index 238b98a6680..bede10a7673 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -24,7 +24,7 @@ log = { workspace = true, default-features = true } test-parachain-undying = { path = ".." } polkadot-primitives = { path = "../../../../primitives" } polkadot-cli = { path = "../../../../cli" } -polkadot-service = { path = "../../../../node/service", features = ["rococo-native"] } +polkadot-service = { path = "../../../../node/service", features = ["elastic-scaling-experimental", "rococo-native"] } polkadot-node-primitives = { path = "../../../../node/primitives" } polkadot-node-subsystem = { path = "../../../../node/subsystem" } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 8a3cd9309db..62c3741c56d 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -27,10 +27,11 @@ use sp_std::{collections::btree_map::BTreeMap, prelude::*}; use polkadot_runtime_parachains::{ assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, - disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, - inclusion as parachains_inclusion, initializer as parachains_initializer, - origin as parachains_origin, paras as parachains_paras, - paras_inherent as parachains_paras_inherent, runtime_api_impl::v7 as runtime_impl, + disputes::slashing as parachains_slashing, + dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, + initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, + paras_inherent as parachains_paras_inherent, + runtime_api_impl::{v7 as runtime_impl, vstaging as staging_runtime_impl}, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -829,6 +830,7 @@ sp_api::impl_runtime_apis! { } } + #[api_version(10)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { runtime_impl::validators::() @@ -956,6 +958,30 @@ sp_api::impl_runtime_apis! { key_ownership_proof, ) } + + fn minimum_backing_votes() -> u32 { + runtime_impl::minimum_backing_votes::() + } + + fn para_backing_state(para_id: ParaId) -> Option { + runtime_impl::backing_state::(para_id) + } + + fn async_backing_params() -> primitives::AsyncBackingParams { + runtime_impl::async_backing_params::() + } + + fn approval_voting_params() -> primitives::vstaging::ApprovalVotingParams { + staging_runtime_impl::approval_voting_params::() + } + + fn disabled_validators() -> Vec { + staging_runtime_impl::disabled_validators::() + } + + fn node_features() -> primitives::vstaging::NodeFeatures { + staging_runtime_impl::node_features::() + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/prdoc/pr_3795.prdoc b/prdoc/pr_3795.prdoc new file mode 100644 index 00000000000..da01fcbec82 --- /dev/null +++ b/prdoc/pr_3795.prdoc @@ -0,0 +1,14 @@ +title: Enable collators to build on multiple cores + +doc: + - audience: Node Dev + description: | + Introduces a `CoreIndex` parameter in `SubmitCollationParams`. This enables + the collators to make use of potentially multiple cores assigned at some relay + chain block. This extra parameter is used by the collator protocol and collation + generation subsystems to forward the collation to the approapriate backing group. + +crates: +- name: polkadot-node-collation-generation +- name: polkadot-collator-protocol + bump: minor -- GitLab From 8342947b8e22d398ab981d33d9d48756a3160f77 Mon Sep 17 00:00:00 2001 From: Ermal Kaleci Date: Wed, 27 Mar 2024 15:51:45 +0100 Subject: [PATCH 044/128] process enqueued messages on idle (#3844) This will make it possible to use remaining weight on idle for processing enqueued messages. More context here https://github.com/paritytech/polkadot-sdk/issues/3709 --------- Co-authored-by: Adrian Catangiu --- .../pallets/outbound-queue/src/mock.rs | 1 + bridges/snowbridge/pallets/system/src/mock.rs | 1 + cumulus/pallets/parachain-system/src/mock.rs | 1 + .../assets/asset-hub-rococo/src/lib.rs | 1 + .../assets/asset-hub-westend/src/lib.rs | 1 + .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 1 + .../bridge-hubs/bridge-hub-westend/src/lib.rs | 1 + .../collectives-westend/src/lib.rs | 1 + .../contracts/contracts-rococo/src/lib.rs | 1 + .../coretime/coretime-rococo/src/lib.rs | 1 + .../coretime/coretime-westend/src/lib.rs | 1 + .../glutton/glutton-westend/src/lib.rs | 1 + .../runtimes/people/people-rococo/src/lib.rs | 1 + .../runtimes/people/people-westend/src/lib.rs | 1 + .../runtimes/starters/shell/src/lib.rs | 1 + .../runtimes/testing/penpal/src/lib.rs | 1 + .../testing/rococo-parachain/src/lib.rs | 1 + polkadot/runtime/parachains/src/mock.rs | 1 + polkadot/runtime/rococo/src/lib.rs | 1 + polkadot/runtime/westend/src/lib.rs | 1 + .../xcm-simulator/example/src/relay_chain.rs | 1 + .../xcm-simulator/fuzzer/src/relay_chain.rs | 1 + prdoc/pr_3844.prdoc | 25 +++++++++++ substrate/bin/node/runtime/src/lib.rs | 1 + .../contracts/mock-network/src/relay_chain.rs | 1 + .../message-queue/src/integration_test.rs | 1 + substrate/frame/message-queue/src/lib.rs | 22 +++++++++- substrate/frame/message-queue/src/mock.rs | 1 + substrate/frame/message-queue/src/tests.rs | 42 +++++++++++++++++++ templates/parachain/runtime/src/lib.rs | 1 + 30 files changed, 114 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_3844.prdoc diff --git a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs index 67877a05c79..5eeeeead140 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs @@ -69,6 +69,7 @@ impl pallet_message_queue::Config for Test { type HeapSize = HeapSize; type MaxStale = MaxStale; type ServiceWeight = ServiceWeight; + type IdleMaxServiceWeight = (); type QueuePausedQuery = (); } diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index 0312456c982..687072a49e2 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -148,6 +148,7 @@ impl pallet_message_queue::Config for Test { type HeapSize = HeapSize; type MaxStale = MaxStale; type ServiceWeight = ServiceWeight; + type IdleMaxServiceWeight = (); type QueuePausedQuery = (); } diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index 0b1d536ba7c..fe89dfe68c6 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -125,6 +125,7 @@ impl pallet_message_queue::Config for Test { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MaxWeight; + type IdleMaxServiceWeight = (); type WeightInfo = (); } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 689d8d56c48..293416ab2a9 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -660,6 +660,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 48106b5f302..e92e801e9f5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -641,6 +641,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 3980fa0d501..f0aa4f8e91c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -387,6 +387,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 9bdea6b9a7d..3b759301d0e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -348,6 +348,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index d3f588bf25f..e1c2e1a6237 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -423,6 +423,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index e1586c7d9b2..ec0a5f6fc96 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -318,6 +318,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 86eb5cdfcaf..67f48689353 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -301,6 +301,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index c31e474cc2f..609ea5a38a8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -301,6 +301,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index cee17cdc7b0..ca1a915ba74 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -212,6 +212,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index cd5f1ad3272..7c9427a2493 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -282,6 +282,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; type WeightInfo = weights::pallet_message_queue::WeightInfo; } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index e840a40f5ac..3e331e5e8eb 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -282,6 +282,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; type WeightInfo = weights::pallet_message_queue::WeightInfo; } diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index 0f4957fd802..ad79d6849bd 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -232,6 +232,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 1d404feac3d..0a55d2dcfe5 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -540,6 +540,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index c6006141981..034d16267d4 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -320,6 +320,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); } impl cumulus_pallet_aura_ext::Config for Runtime {} diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 7ed62a392e4..461b9f4b431 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -365,6 +365,7 @@ impl pallet_message_queue::Config for Test { type HeapSize = ConstU32<65536>; type MaxStale = ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); } parameter_types! { diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 8c8abe97ede..c41ffdbe72d 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -987,6 +987,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; #[cfg(not(feature = "runtime-benchmarks"))] type MessageProcessor = MessageProcessor; #[cfg(feature = "runtime-benchmarks")] diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 02397f35368..e6381513170 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1190,6 +1190,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; #[cfg(not(feature = "runtime-benchmarks"))] type MessageProcessor = MessageProcessor; #[cfg(feature = "runtime-benchmarks")] diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs index 377c77f30a4..286d0038e18 100644 --- a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs @@ -275,6 +275,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); type MessageProcessor = MessageProcessor; type QueueChangeHandler = (); type QueuePausedQuery = (); diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 3224df66cbe..6790b535d16 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -232,6 +232,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); #[cfg(not(feature = "runtime-benchmarks"))] type MessageProcessor = MessageProcessor; #[cfg(feature = "runtime-benchmarks")] diff --git a/prdoc/pr_3844.prdoc b/prdoc/pr_3844.prdoc new file mode 100644 index 00000000000..a92092f91b2 --- /dev/null +++ b/prdoc/pr_3844.prdoc @@ -0,0 +1,25 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add the ability for MessageQueue to process enqueued messages on idle + +doc: + - audience: Runtime Dev + description: | + Add the option to use remaining weight on idle for processing enqueued messages. + This will increase the chances of the messages enqueued during inherent extrinsics to be processed in the same block. + New config types is added on the message-queue `Config` trait: + - `IdleMaxServiceWeight` + + example: + ```rust + parameter_types! { + // The maximum weight to be used from remaining weight for processing enqueued messages on idle + pub const IdleMaxServiceWeight: Weight = Some(Weight); + } + + type IdleMaxServiceWeight = IdleMaxServiceWeight; // or `()` to not use this feature + ``` + +crates: + - name: pallet-message-queue diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index ca7e14f6eb1..a9606ac0bb7 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1303,6 +1303,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = ConstU32<{ 64 * 1024 }>; type MaxStale = ConstU32<128>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); } parameter_types! { diff --git a/substrate/frame/contracts/mock-network/src/relay_chain.rs b/substrate/frame/contracts/mock-network/src/relay_chain.rs index e2a8d3d1337..470304ed357 100644 --- a/substrate/frame/contracts/mock-network/src/relay_chain.rs +++ b/substrate/frame/contracts/mock-network/src/relay_chain.rs @@ -225,6 +225,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); type MessageProcessor = MessageProcessor; type QueueChangeHandler = (); type WeightInfo = (); diff --git a/substrate/frame/message-queue/src/integration_test.rs b/substrate/frame/message-queue/src/integration_test.rs index 26a330cc88e..14b8d2217eb 100644 --- a/substrate/frame/message-queue/src/integration_test.rs +++ b/substrate/frame/message-queue/src/integration_test.rs @@ -73,6 +73,7 @@ impl Config for Test { type HeapSize = HeapSize; type MaxStale = MaxStale; type ServiceWeight = ServiceWeight; + type IdleMaxServiceWeight = (); } /// Simulates heavy usage by enqueueing and processing large amounts of messages. diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index 93cd760eeb9..ec85c785f79 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -525,12 +525,21 @@ pub mod pallet { type MaxStale: Get; /// The amount of weight (if any) which should be provided to the message queue for - /// servicing enqueued items. + /// servicing enqueued items `on_initialize`. /// /// This may be legitimately `None` in the case that you will call - /// `ServiceQueues::service_queues` manually. + /// `ServiceQueues::service_queues` manually or set [`Self::IdleMaxServiceWeight`] to have + /// it run in `on_idle`. #[pallet::constant] type ServiceWeight: Get>; + + /// The maximum amount of weight (if any) to be used from remaining weight `on_idle` which + /// should be provided to the message queue for servicing enqueued items `on_idle`. + /// Useful for parachains to process messages at the same block they are received. + /// + /// If `None`, it will not call `ServiceQueues::service_queues` in `on_idle`. + #[pallet::constant] + type IdleMaxServiceWeight: Get>; } #[pallet::event] @@ -643,6 +652,15 @@ pub mod pallet { } } + fn on_idle(_n: BlockNumberFor, remaining_weight: Weight) -> Weight { + if let Some(weight_limit) = T::IdleMaxServiceWeight::get() { + // Make use of the remaining weight to process enqueued messages. + Self::service_queues(weight_limit.min(remaining_weight)) + } else { + Weight::zero() + } + } + #[cfg(feature = "try-runtime")] fn try_state(_: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { Self::do_try_state() diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index f22f318b8ef..1281de6b0a6 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -56,6 +56,7 @@ impl Config for Test { type HeapSize = HeapSize; type MaxStale = MaxStale; type ServiceWeight = ServiceWeight; + type IdleMaxServiceWeight = ServiceWeight; } /// Mocked `WeightInfo` impl with allows to set the weight per call. diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index 1f6e7777f01..d6788847d57 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -1838,3 +1838,45 @@ fn with_service_mutex_works() { with_service_mutex(|| called = 3).unwrap(); assert_eq!(called, 3); } + +#[test] +fn process_enqueued_on_idle() { + use MessageOrigin::*; + build_and_execute::(|| { + // Some messages enqueued on previous block. + MessageQueue::enqueue_messages(vec![msg("a"), msg("ab"), msg("abc")].into_iter(), Here); + assert_eq!(BookStateFor::::iter().count(), 1); + + // Process enqueued messages from previous block. + Pallet::::on_initialize(1); + assert_eq!( + MessagesProcessed::take(), + vec![(b"a".to_vec(), Here), (b"ab".to_vec(), Here), (b"abc".to_vec(), Here),] + ); + + MessageQueue::enqueue_messages(vec![msg("x"), msg("xy"), msg("xyz")].into_iter(), There); + assert_eq!(BookStateFor::::iter().count(), 2); + + // Enough weight to process on idle. + Pallet::::on_idle(1, Weight::from_parts(100, 100)); + assert_eq!( + MessagesProcessed::take(), + vec![(b"x".to_vec(), There), (b"xy".to_vec(), There), (b"xyz".to_vec(), There)] + ); + }) +} + +#[test] +fn process_enqueued_on_idle_requires_enough_weight() { + use MessageOrigin::*; + build_and_execute::(|| { + Pallet::::on_initialize(1); + + MessageQueue::enqueue_messages(vec![msg("x"), msg("xy"), msg("xyz")].into_iter(), There); + assert_eq!(BookStateFor::::iter().count(), 1); + + // Not enough weight to process on idle. + Pallet::::on_idle(1, Weight::from_parts(0, 0)); + assert_eq!(MessagesProcessed::take(), vec![]); + }) +} diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index ad21b79a5b1..88b0f5a1474 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -414,6 +414,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); } impl cumulus_pallet_aura_ext::Config for Runtime {} -- GitLab From 374aefa4f2d3754e5a6e9464f5d1692af9a1c6be Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Wed, 27 Mar 2024 17:03:53 +0100 Subject: [PATCH 045/128] [ci] Fix publish benchmarks job (#3864) Few fixes in CI to publish benchmarks jobs. cc https://github.com/paritytech/ci_cd/issues/934 --- .gitlab/pipeline/publish.yml | 4 ++-- .gitlab/pipeline/test.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index bd9387f3c07..a37ba012a8a 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -88,7 +88,7 @@ publish-subsystem-benchmarks: - git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" - git fetch origin gh-pages # Push result to github - - git checkout gh-pages + - git checkout gh-pages --force - mkdir -p bench/gitlab/ || echo "Directory exists" - rm -rf bench/gitlab/*.json || echo "No json files" - cp -r charts/*.json bench/gitlab/ @@ -121,7 +121,7 @@ trigger_workflow: curl -q -X POST \ -H "Accept: application/vnd.github.v3+json" \ -H "Authorization: token $GITHUB_TOKEN" \ - https://api.github.com/repos/paritytech-stg/${CI_PROJECT_NAME}/actions/workflows/subsystem-benchmarks.yml/dispatches \ + https://api.github.com/repos/paritytech/${CI_PROJECT_NAME}/actions/workflows/subsystem-benchmarks.yml/dispatches \ -d '{"ref":"refs/heads/master","inputs":{"benchmark-data-dir-path":"'$benchmark_dir'","output-file-path":"'$bencmark_name'"}}' sleep 300 done diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index d97f9da986c..af261a893da 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -499,7 +499,7 @@ subsystem-regression-tests: stage: test artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" - when: on_success + when: always expire_in: 1 days paths: - charts/ -- GitLab From bbdbeb7ec66e81ff2f571655113b5259198e1611 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20Pestana?= Date: Wed, 27 Mar 2024 18:20:24 +0100 Subject: [PATCH 046/128] Extrinsic to restore corrupt staking ledgers (#3706) This PR adds a new extrinsic `Call::restore_ledger ` gated by `StakingAdmin` origin that restores a corrupted staking ledger. This extrinsic will be used to recover ledgers that were affected by the issue discussed in https://github.com/paritytech/polkadot-sdk/issues/3245. The extrinsic will re-write the storage items associated with a stash account provided as input parameter. The data used to reset the ledger can be either i) fetched on-chain or ii) partially/totally set by the input parameters of the call. In order to use on-chain data to restore the staking locks, we need a way to read the current lock in the balances pallet. This PR adds a `InspectLockableCurrency` trait and implements it in the pallet balances. An alternative would be to tightly couple staking with the pallet balances but that's inelegant (an example of how it would look like in [this branch](https://github.com/paritytech/polkadot-sdk/tree/gpestana/ledger-badstate-clean_tightly)). More details on the type of corruptions and corresponding fixes https://hackmd.io/DLb5jEYWSmmvqXC9ae4yRg?view#/ We verified that the `Call::restore_ledger` does fix all current corrupted ledgers in Polkadot and Kusama. You can verify it here https://hackmd.io/v-XNrEoGRpe7APR-EZGhOA. **Changes introduced** - Adds `Call::restore_ledger ` extrinsic to recover a corrupted ledger; - Adds trait `frame_support::traits::currency::InspectLockableCurrency` to allow external pallets to read current locks given an account and lock ID; - Implements the `InspectLockableCurrency` in the pallet-balances. - Adds staking locks try-runtime checks (https://github.com/paritytech/polkadot-sdk/issues/3751) **Todo** - [x] benchmark `Call::restore_ledger` - [x] throughout testing of all ledger recovering cases - [x] consider adding the staking locks try-runtime checks to this PR (https://github.com/paritytech/polkadot-sdk/issues/3751) - [x] simulate restoring all ledgers (https://hackmd.io/Dsa2tvhISNSs7zcqriTaxQ?view) in Polkadot and Kusama using chopsticks -- https://hackmd.io/v-XNrEoGRpe7APR-EZGhOA Related to https://github.com/paritytech/polkadot-sdk/issues/3245 Closes https://github.com/paritytech/polkadot-sdk/issues/3751 --------- Co-authored-by: command-bot <> --- .../westend/src/weights/pallet_staking.rs | 286 +++++---- prdoc/pr_3706.prdoc | 20 + substrate/frame/balances/src/impl_currency.rs | 13 +- .../balances/src/tests/currency_tests.rs | 22 +- substrate/frame/staking/Cargo.toml | 2 +- substrate/frame/staking/src/benchmarking.rs | 9 + substrate/frame/staking/src/lib.rs | 14 + substrate/frame/staking/src/mock.rs | 115 ++-- substrate/frame/staking/src/pallet/impls.rs | 60 +- substrate/frame/staking/src/pallet/mod.rs | 121 +++- substrate/frame/staking/src/tests.rs | 446 +++++++++++++- substrate/frame/staking/src/weights.rs | 577 ++++++++++-------- substrate/frame/support/src/traits.rs | 4 +- .../support/src/traits/tokens/currency.rs | 2 +- .../src/traits/tokens/currency/lockable.rs | 6 + 15 files changed, 1206 insertions(+), 491 deletions(-) create mode 100644 prdoc/pr_3706.prdoc diff --git a/polkadot/runtime/westend/src/weights/pallet_staking.rs b/polkadot/runtime/westend/src/weights/pallet_staking.rs index 7a641e36a12..393fa0b3717 100644 --- a/polkadot/runtime/westend/src/weights/pallet_staking.rs +++ b/polkadot/runtime/westend/src/weights/pallet_staking.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-01-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-8idpd4bs-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -50,22 +50,22 @@ pub struct WeightInfo(PhantomData); impl pallet_staking::WeightInfo for WeightInfo { /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:0 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `894` + // Measured: `1009` // Estimated: `4764` - // Minimum execution time: 37_340_000 picoseconds. - Weight::from_parts(38_930_000, 0) + // Minimum execution time: 40_585_000 picoseconds. + Weight::from_parts(41_800_000, 0) .saturating_add(Weight::from_parts(0, 4764)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Staking::Bonded` (r:1 w:0) @@ -84,22 +84,22 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1921` // Estimated: `8877` - // Minimum execution time: 80_630_000 picoseconds. - Weight::from_parts(82_196_000, 0) + // Minimum execution time: 81_809_000 picoseconds. + Weight::from_parts(84_387_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -112,43 +112,45 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2128` // Estimated: `8877` - // Minimum execution time: 83_523_000 picoseconds. - Weight::from_parts(86_639_000, 0) + // Minimum execution time: 89_419_000 picoseconds. + Weight::from_parts(91_237_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1075` + // Measured: `1223` // Estimated: `4764` - // Minimum execution time: 38_636_000 picoseconds. - Weight::from_parts(40_399_283, 0) + // Minimum execution time: 45_152_000 picoseconds. + Weight::from_parts(46_460_819, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 869 - .saturating_add(Weight::from_parts(37_752, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Standard Error: 972 + .saturating_add(Weight::from_parts(55_473, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Bonded` (r:1 w:1) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -174,11 +176,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 81_301_000 picoseconds. - Weight::from_parts(88_609_205, 0) + // Minimum execution time: 82_762_000 picoseconds. + Weight::from_parts(91_035_077, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 3_388 - .saturating_add(Weight::from_parts(1_253_692, 0).saturating_mul(s.into())) + // Standard Error: 3_771 + .saturating_add(Weight::from_parts(1_217_871, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(11)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -186,6 +188,8 @@ impl pallet_staking::WeightInfo for WeightInfo { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinValidatorBond` (r:1 w:0) /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MinCommission` (r:1 w:0) @@ -196,8 +200,6 @@ impl pallet_staking::WeightInfo for WeightInfo { /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:1 w:1) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -210,33 +212,37 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1301` // Estimated: `4556` - // Minimum execution time: 47_292_000 picoseconds. - Weight::from_parts(48_566_000, 0) + // Minimum execution time: 50_555_000 picoseconds. + Weight::from_parts(52_052_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:128 w:128) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1243 + k * (569 ±0)` + // Measured: `1778 + k * (572 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 28_840_000 picoseconds. - Weight::from_parts(27_510_817, 0) + // Minimum execution time: 35_037_000 picoseconds. + Weight::from_parts(35_081_878, 0) .saturating_add(Weight::from_parts(0, 4556)) - // Standard Error: 6_603 - .saturating_add(Weight::from_parts(6_268_853, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(1)) + // Standard Error: 5_473 + .saturating_add(Weight::from_parts(6_667_924, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -247,8 +253,6 @@ impl pallet_staking::WeightInfo for WeightInfo { /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:2 w:2) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -262,11 +266,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1797 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 57_537_000 picoseconds. - Weight::from_parts(55_854_233, 0) + // Minimum execution time: 62_098_000 picoseconds. + Weight::from_parts(60_154_061, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 14_427 - .saturating_add(Weight::from_parts(3_844_957, 0).saturating_mul(n.into())) + // Standard Error: 19_257 + .saturating_add(Weight::from_parts(3_839_855, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6)) @@ -274,6 +278,8 @@ impl pallet_staking::WeightInfo for WeightInfo { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -288,12 +294,12 @@ impl pallet_staking::WeightInfo for WeightInfo { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1581` + // Measured: `1747` // Estimated: `6248` - // Minimum execution time: 49_997_000 picoseconds. - Weight::from_parts(51_266_000, 0) + // Minimum execution time: 54_993_000 picoseconds. + Weight::from_parts(56_698_000, 0) .saturating_add(Weight::from_parts(0, 6248)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `Staking::Ledger` (r:1 w:0) @@ -306,40 +312,40 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `865` // Estimated: `4556` - // Minimum execution time: 15_342_000 picoseconds. - Weight::from_parts(15_970_000, 0) + // Minimum execution time: 18_100_000 picoseconds. + Weight::from_parts(18_547_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:1 w:1) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn update_payee() -> Weight { // Proof Size summary in bytes: // Measured: `932` // Estimated: `4556` - // Minimum execution time: 20_719_000 picoseconds. - Weight::from_parts(21_373_000, 0) + // Minimum execution time: 23_428_000 picoseconds. + Weight::from_parts(24_080_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:2) + /// Storage: `Staking::Ledger` (r:2 w:2) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_controller() -> Weight { // Proof Size summary in bytes: // Measured: `865` - // Estimated: `4556` - // Minimum execution time: 18_237_000 picoseconds. - Weight::from_parts(18_896_000, 0) - .saturating_add(Weight::from_parts(0, 4556)) - .saturating_add(T::DbWeight::get().reads(2)) + // Estimated: `8122` + // Minimum execution time: 21_159_000 picoseconds. + Weight::from_parts(21_706_000, 0) + .saturating_add(Weight::from_parts(0, 8122)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Staking::ValidatorCount` (r:0 w:1) @@ -348,8 +354,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_946_000 picoseconds. - Weight::from_parts(2_131_000, 0) + // Minimum execution time: 1_910_000 picoseconds. + Weight::from_parts(2_003_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -359,8 +365,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_840_000 picoseconds. - Weight::from_parts(7_208_000, 0) + // Minimum execution time: 7_076_000 picoseconds. + Weight::from_parts(7_349_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -370,8 +376,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_812_000 picoseconds. - Weight::from_parts(7_254_000, 0) + // Minimum execution time: 7_067_000 picoseconds. + Weight::from_parts(7_389_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -381,8 +387,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_787_000 picoseconds. - Weight::from_parts(7_206_000, 0) + // Minimum execution time: 7_148_000 picoseconds. + Weight::from_parts(7_446_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -393,32 +399,32 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_045_000 picoseconds. - Weight::from_parts(2_281_841, 0) + // Minimum execution time: 2_025_000 picoseconds. + Weight::from_parts(2_229_953, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 70 - .saturating_add(Weight::from_parts(11_592, 0).saturating_mul(v.into())) + // Standard Error: 67 + .saturating_add(Weight::from_parts(11_785, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Staking::Ledger` (r:751 w:1502) + /// Storage: `Staking::Ledger` (r:1502 w:1502) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:751 w:751) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:751 w:0) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:0 w:751) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 751]`. fn deprecate_controller_batch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `668 + i * (148 ±0)` - // Estimated: `990 + i * (3566 ±0)` - // Minimum execution time: 1_657_000 picoseconds. - Weight::from_parts(1_702_000, 0) + // Measured: `680 + i * (227 ±0)` + // Estimated: `990 + i * (7132 ±0)` + // Minimum execution time: 4_321_000 picoseconds. + Weight::from_parts(4_407_000, 0) .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 20_041 - .saturating_add(Weight::from_parts(13_165_254, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) + // Standard Error: 37_239 + .saturating_add(Weight::from_parts(21_300_598, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(0, 7132).saturating_mul(i.into())) } /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -453,11 +459,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 78_774_000 picoseconds. - Weight::from_parts(85_770_713, 0) + // Minimum execution time: 78_908_000 picoseconds. + Weight::from_parts(84_886_373, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 2_815 - .saturating_add(Weight::from_parts(1_244_494, 0).saturating_mul(s.into())) + // Standard Error: 3_376 + .saturating_add(Weight::from_parts(1_217_850, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(12)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -470,11 +476,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `66639` // Estimated: `70104` - // Minimum execution time: 129_905_000 picoseconds. - Weight::from_parts(932_195_554, 0) + // Minimum execution time: 136_389_000 picoseconds. + Weight::from_parts(1_207_241_524, 0) .saturating_add(Weight::from_parts(0, 70104)) - // Standard Error: 57_492 - .saturating_add(Weight::from_parts(4_826_754, 0).saturating_mul(s.into())) + // Standard Error: 77_138 + .saturating_add(Weight::from_parts(6_443_948, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -511,11 +517,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `8249 + n * (396 ±0)` // Estimated: `10779 + n * (3774 ±0)` - // Minimum execution time: 127_094_000 picoseconds. - Weight::from_parts(160_088_053, 0) + // Minimum execution time: 130_222_000 picoseconds. + Weight::from_parts(167_236_150, 0) .saturating_add(Weight::from_parts(0, 10779)) - // Standard Error: 32_978 - .saturating_add(Weight::from_parts(39_845_710, 0).saturating_mul(n.into())) + // Standard Error: 34_051 + .saturating_add(Weight::from_parts(39_899_917, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(14)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4)) @@ -539,11 +545,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1922 + l * (5 ±0)` // Estimated: `8877` - // Minimum execution time: 75_672_000 picoseconds. - Weight::from_parts(78_708_335, 0) + // Minimum execution time: 79_136_000 picoseconds. + Weight::from_parts(82_129_497, 0) .saturating_add(Weight::from_parts(0, 8877)) - // Standard Error: 3_387 - .saturating_add(Weight::from_parts(37_084, 0).saturating_mul(l.into())) + // Standard Error: 3_867 + .saturating_add(Weight::from_parts(75_156, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -578,11 +584,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 87_991_000 picoseconds. - Weight::from_parts(90_272_005, 0) + // Minimum execution time: 89_375_000 picoseconds. + Weight::from_parts(91_224_907, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 2_815 - .saturating_add(Weight::from_parts(1_232_322, 0).saturating_mul(s.into())) + // Standard Error: 3_424 + .saturating_add(Weight::from_parts(1_219_542, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(11)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -627,14 +633,14 @@ impl pallet_staking::WeightInfo for WeightInfo { fn new_era(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (716 ±0) + v * (3594 ±0)` - // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 528_862_000 picoseconds. - Weight::from_parts(534_620_000, 0) + // Estimated: `456136 + n * (3566 ±4) + v * (3566 ±0)` + // Minimum execution time: 520_905_000 picoseconds. + Weight::from_parts(523_771_000, 0) .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 2_005_553 - .saturating_add(Weight::from_parts(65_586_008, 0).saturating_mul(v.into())) - // Standard Error: 199_842 - .saturating_add(Weight::from_parts(18_155_389, 0).saturating_mul(n.into())) + // Standard Error: 2_142_714 + .saturating_add(Weight::from_parts(68_631_588, 0).saturating_mul(v.into())) + // Standard Error: 213_509 + .saturating_add(Weight::from_parts(19_343_025, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(184)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -665,13 +671,13 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `3108 + n * (907 ±0) + v * (391 ±0)` // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 33_532_110_000 picoseconds. - Weight::from_parts(33_926_321_000, 0) + // Minimum execution time: 36_848_619_000 picoseconds. + Weight::from_parts(37_362_442_000, 0) .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 374_134 - .saturating_add(Weight::from_parts(4_627_629, 0).saturating_mul(v.into())) - // Standard Error: 374_134 - .saturating_add(Weight::from_parts(4_068_168, 0).saturating_mul(n.into())) + // Standard Error: 415_031 + .saturating_add(Weight::from_parts(5_204_987, 0).saturating_mul(v.into())) + // Standard Error: 415_031 + .saturating_add(Weight::from_parts(4_132_636, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(179)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -688,11 +694,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `946 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_395_956_000 picoseconds. - Weight::from_parts(88_416_870, 0) + // Minimum execution time: 2_512_817_000 picoseconds. + Weight::from_parts(119_401_374, 0) .saturating_add(Weight::from_parts(0, 3510)) - // Standard Error: 8_731 - .saturating_add(Weight::from_parts(4_750_956, 0).saturating_mul(v.into())) + // Standard Error: 8_463 + .saturating_add(Weight::from_parts(4_860_364, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -715,8 +721,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_761_000 picoseconds. - Weight::from_parts(4_013_000, 0) + // Minimum execution time: 3_686_000 picoseconds. + Weight::from_parts(3_881_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -738,8 +744,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_325_000 picoseconds. - Weight::from_parts(3_519_000, 0) + // Minimum execution time: 3_143_000 picoseconds. + Weight::from_parts(3_424_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -769,8 +775,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1870` // Estimated: `6248` - // Minimum execution time: 63_583_000 picoseconds. - Weight::from_parts(65_917_000, 0) + // Minimum execution time: 66_946_000 picoseconds. + Weight::from_parts(69_382_000, 0) .saturating_add(Weight::from_parts(0, 6248)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(6)) @@ -783,8 +789,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `658` // Estimated: `3510` - // Minimum execution time: 10_975_000 picoseconds. - Weight::from_parts(11_328_000, 0) + // Minimum execution time: 11_278_000 picoseconds. + Weight::from_parts(11_603_000, 0) .saturating_add(Weight::from_parts(0, 3510)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -795,9 +801,29 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_954_000 picoseconds. - Weight::from_parts(2_081_000, 0) + // Minimum execution time: 1_963_000 picoseconds. + Weight::from_parts(2_077_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + fn restore_ledger() -> Weight { + // Proof Size summary in bytes: + // Measured: `1014` + // Estimated: `4764` + // Minimum execution time: 40_258_000 picoseconds. + Weight::from_parts(41_210_000, 0) + .saturating_add(Weight::from_parts(0, 4764)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } } diff --git a/prdoc/pr_3706.prdoc b/prdoc/pr_3706.prdoc new file mode 100644 index 00000000000..edeb08241be --- /dev/null +++ b/prdoc/pr_3706.prdoc @@ -0,0 +1,20 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Extrinsic to restore corrupted staking ledgers + +doc: + - audience: Runtime User + description: | + This PR adds a new extrinsic `Call::restore_ledger ` gated by `StakingAdmin` origin that restores a corrupted staking ledger. This extrinsic will be used to recover ledgers that were affected by the issue discussed in https://github.com/paritytech/polkadot-sdk/issues/3245. + The extrinsic will re-write the storage items associated with a stash account provided as input parameter. The data used to reset the ledger can be either i) fetched on-chain or ii) partially/totally set by the input parameters of the call. + + Changes introduced: + - Adds `Call::restore_ledger ` extrinsic to recover a corrupted ledger; + - Adds trait `frame_support::traits::currency::InspectLockableCurrency` to allow external pallets to read current locks given an account and lock ID; + - Implements the `InspectLockableCurrency` in the pallet-balances. + - Adds staking locks try-runtime checks (https://github.com/paritytech/polkadot-sdk/issues/3751) + +crates: + - name: pallet-staking + - name: pallet-balances diff --git a/substrate/frame/balances/src/impl_currency.rs b/substrate/frame/balances/src/impl_currency.rs index 1ac882ade70..d5fe9934e23 100644 --- a/substrate/frame/balances/src/impl_currency.rs +++ b/substrate/frame/balances/src/impl_currency.rs @@ -28,8 +28,8 @@ use frame_support::{ tokens::{fungible, BalanceStatus as Status, Fortitude::Polite, Precision::BestEffort}, Currency, DefensiveSaturating, ExistenceRequirement, ExistenceRequirement::AllowDeath, - Get, Imbalance, LockIdentifier, LockableCurrency, NamedReservableCurrency, - ReservableCurrency, SignedImbalance, TryDrop, WithdrawReasons, + Get, Imbalance, InspectLockableCurrency, LockIdentifier, LockableCurrency, + NamedReservableCurrency, ReservableCurrency, SignedImbalance, TryDrop, WithdrawReasons, }, }; use frame_system::pallet_prelude::BlockNumberFor; @@ -918,3 +918,12 @@ where Self::update_locks(who, &locks[..]); } } + +impl, I: 'static> InspectLockableCurrency for Pallet { + fn balance_locked(id: LockIdentifier, who: &T::AccountId) -> Self::Balance { + Self::locks(who) + .into_iter() + .filter(|l| l.id == id) + .fold(Zero::zero(), |acc, l| acc + l.amount) + } +} diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs index bd4ff762c74..450b1a84aa8 100644 --- a/substrate/frame/balances/src/tests/currency_tests.rs +++ b/substrate/frame/balances/src/tests/currency_tests.rs @@ -24,8 +24,8 @@ use frame_support::{ BalanceStatus::{Free, Reserved}, Currency, ExistenceRequirement::{self, AllowDeath, KeepAlive}, - Hooks, LockIdentifier, LockableCurrency, NamedReservableCurrency, ReservableCurrency, - WithdrawReasons, + Hooks, InspectLockableCurrency, LockIdentifier, LockableCurrency, NamedReservableCurrency, + ReservableCurrency, WithdrawReasons, }, StorageNoopGuard, }; @@ -88,6 +88,24 @@ fn basic_locking_should_work() { }); } +#[test] +fn inspect_lock_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + Balances::set_lock(ID_1, &1, 10, WithdrawReasons::all()); + Balances::set_lock(ID_2, &1, 10, WithdrawReasons::all()); + Balances::set_lock(ID_1, &2, 20, WithdrawReasons::all()); + + assert_eq!(>::balance_locked(ID_1, &1), 10); + assert_eq!(>::balance_locked(ID_2, &1), 10); + assert_eq!(>::balance_locked(ID_1, &2), 20); + assert_eq!(>::balance_locked(ID_2, &2), 0); + assert_eq!(>::balance_locked(ID_1, &3), 0); + }) +} + #[test] fn account_should_be_reaped() { ExtBuilder::default() diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index d2a46146931..15c4bf9e290 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -40,10 +40,10 @@ frame-benchmarking = { path = "../benchmarking", default-features = false, optio rand_chacha = { version = "0.2", default-features = false, optional = true } [dev-dependencies] +pallet-balances = { path = "../balances" } sp-tracing = { path = "../../primitives/tracing" } sp-core = { path = "../../primitives/core" } sp-npos-elections = { path = "../../primitives/npos-elections" } -pallet-balances = { path = "../balances" } pallet-timestamp = { path = "../timestamp" } pallet-staking-reward-curve = { path = "reward-curve" } pallet-bags-list = { path = "../bags-list" } diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index a8306087397..0b67cd46039 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -953,6 +953,15 @@ benchmarks! { assert_eq!(MinCommission::::get(), Perbill::from_percent(100)); } + restore_ledger { + let (stash, controller) = create_stash_controller::(0, 100, RewardDestination::Staked)?; + // corrupt ledger. + Ledger::::remove(controller); + }: _(RawOrigin::Root, stash.clone(), None, None, None) + verify { + assert_eq!(Staking::::inspect_bond_state(&stash), Ok(LedgerIntegrityState::Ok)); + } + impl_benchmark_test_suite!( Staking, crate::mock::ExtBuilder::default().has_stakers(true), diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 5a92b6c855f..f5b7e3eca3d 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -491,6 +491,20 @@ pub struct StakingLedger { controller: Option, } +/// State of a ledger with regards with its data and metadata integrity. +#[derive(PartialEq, Debug)] +enum LedgerIntegrityState { + /// Ledger, bond and corresponding staking lock is OK. + Ok, + /// Ledger and/or bond is corrupted. This means that the bond has a ledger with a different + /// stash than the bonded stash. + Corrupted, + /// Ledger was corrupted and it has been killed. + CorruptedKilled, + /// Ledger and bond are OK, however the ledger's stash lock is out of sync. + LockCorrupted, +} + impl StakingLedger { /// Remove entries from `unlocking` that are sufficiently old and reduce the /// total by the sum of their balances. diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 6c2ea225ff1..6db462c1a70 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -25,8 +25,8 @@ use frame_election_provider_support::{ use frame_support::{ assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ - ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, OnUnbalanced, - OneSessionHandler, + ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, LockableCurrency, + OnUnbalanced, OneSessionHandler, WithdrawReasons, }, weights::constants::RocksDbWeight, }; @@ -786,55 +786,86 @@ pub(crate) fn bond_controller_stash(controller: AccountId, stash: AccountId) -> Ok(()) } +// simulates `set_controller` without corrupted ledger checks for testing purposes. +pub(crate) fn set_controller_no_checks(stash: &AccountId) { + let controller = Bonded::::get(stash).expect("testing stash should be bonded"); + let ledger = Ledger::::get(&controller).expect("testing ledger should exist"); + + Ledger::::remove(&controller); + Ledger::::insert(stash, ledger); + Bonded::::insert(stash, stash); +} + +// simulates `bond_extra` without corrupted ledger checks for testing purposes. +pub(crate) fn bond_extra_no_checks(stash: &AccountId, amount: Balance) { + let controller = Bonded::::get(stash).expect("bond must exist to bond_extra"); + let mut ledger = Ledger::::get(&controller).expect("ledger must exist to bond_extra"); + + let new_total = ledger.total + amount; + Balances::set_lock(crate::STAKING_ID, stash, new_total, WithdrawReasons::all()); + ledger.total = new_total; + ledger.active = new_total; + Ledger::::insert(controller, ledger); +} + pub(crate) fn setup_double_bonded_ledgers() { - assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 10, RewardDestination::Staked)); - assert_ok!(Staking::bond(RuntimeOrigin::signed(2), 20, RewardDestination::Staked)); - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 20, RewardDestination::Staked)); + let init_ledgers = Ledger::::iter().count(); + + let _ = Balances::make_free_balance_be(&333, 2000); + let _ = Balances::make_free_balance_be(&444, 2000); + let _ = Balances::make_free_balance_be(&555, 2000); + let _ = Balances::make_free_balance_be(&777, 2000); + + assert_ok!(Staking::bond(RuntimeOrigin::signed(333), 10, RewardDestination::Staked)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(444), 20, RewardDestination::Staked)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(555), 20, RewardDestination::Staked)); // not relevant to the test case, but ensures try-runtime checks pass. - [1, 2, 3] + [333, 444, 555] .iter() .for_each(|s| Payee::::insert(s, RewardDestination::Staked)); // we want to test the case where a controller can also be a stash of another ledger. // for that, we change the controller/stash bonding so that: - // * 2 becomes controller of 1. - // * 3 becomes controller of 2. - // * 4 becomes controller of 3. - let ledger_1 = Ledger::::get(1).unwrap(); - let ledger_2 = Ledger::::get(2).unwrap(); - let ledger_3 = Ledger::::get(3).unwrap(); - - // 4 becomes controller of 3. - Bonded::::mutate(3, |controller| *controller = Some(4)); - Ledger::::insert(4, ledger_3); - - // 3 becomes controller of 2. - Bonded::::mutate(2, |controller| *controller = Some(3)); - Ledger::::insert(3, ledger_2); - - // 2 becomes controller of 1 - Bonded::::mutate(1, |controller| *controller = Some(2)); - Ledger::::insert(2, ledger_1); - // 1 is not controller anymore. - Ledger::::remove(1); + // * 444 becomes controller of 333. + // * 555 becomes controller of 444. + // * 777 becomes controller of 555. + let ledger_333 = Ledger::::get(333).unwrap(); + let ledger_444 = Ledger::::get(444).unwrap(); + let ledger_555 = Ledger::::get(555).unwrap(); + + // 777 becomes controller of 555. + Bonded::::mutate(555, |controller| *controller = Some(777)); + Ledger::::insert(777, ledger_555); + + // 555 becomes controller of 444. + Bonded::::mutate(444, |controller| *controller = Some(555)); + Ledger::::insert(555, ledger_444); + + // 444 becomes controller of 333. + Bonded::::mutate(333, |controller| *controller = Some(444)); + Ledger::::insert(444, ledger_333); + + // 333 is not controller anymore. + Ledger::::remove(333); // checks. now we have: - // * 3 ledgers - assert_eq!(Ledger::::iter().count(), 3); - // * stash 1 has controller 2. - assert_eq!(Bonded::::get(1), Some(2)); - assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(1)), Some(2)); - assert_eq!(Ledger::::get(2).unwrap().stash, 1); - - // * stash 2 has controller 3. - assert_eq!(Bonded::::get(2), Some(3)); - assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(2)), Some(3)); - assert_eq!(Ledger::::get(3).unwrap().stash, 2); - - // * stash 3 has controller 4. - assert_eq!(Bonded::::get(3), Some(4)); - assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(3)), Some(4)); - assert_eq!(Ledger::::get(4).unwrap().stash, 3); + // * +3 ledgers + assert_eq!(Ledger::::iter().count(), 3 + init_ledgers); + + // * stash 333 has controller 444. + assert_eq!(Bonded::::get(333), Some(444)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(333)), Some(444)); + assert_eq!(Ledger::::get(444).unwrap().stash, 333); + + // * stash 444 has controller 555. + assert_eq!(Bonded::::get(444), Some(555)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(444)), Some(555)); + assert_eq!(Ledger::::get(555).unwrap().stash, 444); + + // * stash 555 has controller 777. + assert_eq!(Bonded::::get(555), Some(777)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(555)), Some(777)); + assert_eq!(Ledger::::get(777).unwrap().stash, 555); } #[macro_export] diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 407b301fad2..2f43e4847e4 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -27,8 +27,8 @@ use frame_support::{ dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ - Currency, Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance, Len, - OnUnbalanced, TryCollect, UnixTime, + Currency, Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance, + InspectLockableCurrency, Len, OnUnbalanced, TryCollect, UnixTime, }, weights::Weight, }; @@ -50,8 +50,8 @@ use sp_std::prelude::*; use crate::{ election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing, IndividualExposure, - MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota, PositiveImbalanceOf, - RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, + LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota, + PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, }; use super::pallet::*; @@ -84,6 +84,38 @@ impl Pallet { StakingLedger::::paired_account(Stash(stash.clone())) } + /// Inspects and returns the corruption state of a ledger and bond, if any. + /// + /// Note: all operations in this method access directly the `Bonded` and `Ledger` storage maps + /// instead of using the [`StakingLedger`] API since the bond and/or ledger may be corrupted. + pub(crate) fn inspect_bond_state( + stash: &T::AccountId, + ) -> Result> { + let lock = T::Currency::balance_locked(crate::STAKING_ID, &stash); + + let controller = >::get(stash).ok_or_else(|| { + if lock == Zero::zero() { + Error::::NotStash + } else { + Error::::BadState + } + })?; + + match Ledger::::get(controller) { + Some(ledger) => + if ledger.stash != *stash { + Ok(LedgerIntegrityState::Corrupted) + } else { + if lock != ledger.total { + Ok(LedgerIntegrityState::LockCorrupted) + } else { + Ok(LedgerIntegrityState::Ok) + } + }, + None => Ok(LedgerIntegrityState::CorruptedKilled), + } + } + /// The total balance that can be slashed from a stash account as of right now. pub fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { // Weight note: consider making the stake accessible through stash. @@ -1837,12 +1869,12 @@ impl Pallet { "VoterList contains non-staker" ); + Self::check_ledgers()?; Self::check_bonded_consistency()?; Self::check_payees()?; Self::check_nominators()?; Self::check_exposures()?; Self::check_paged_exposures()?; - Self::check_ledgers()?; Self::check_count() } @@ -1851,6 +1883,7 @@ impl Pallet { /// * A bonded (stash, controller) pair should have only one associated ledger. I.e. if the /// ledger is bonded by stash, the controller account must not bond a different ledger. /// * A bonded (stash, controller) pair must have an associated ledger. + /// /// NOTE: these checks result in warnings only. Once /// is resolved, turn warns into check /// failures. @@ -1945,19 +1978,18 @@ impl Pallet { } /// Invariants: - /// * `ledger.controller` is not stored in the storage (but populated at retrieval). /// * Stake consistency: ledger.total == ledger.active + sum(ledger.unlocking). - /// * The controller keying the ledger and the ledger stash matches the state of the `Bonded` - /// storage. + /// * The ledger's controller and stash matches the associated `Bonded` tuple. + /// * Staking locked funds for every bonded stash should be the same as its ledger's total. + /// * Staking ledger and bond are not corrupted. fn check_ledgers() -> Result<(), TryRuntimeError> { Bonded::::iter() .map(|(stash, ctrl)| { - // `ledger.controller` is never stored in raw storage. - let raw = Ledger::::get(stash).unwrap_or_else(|| { - Ledger::::get(ctrl.clone()) - .expect("try_check: bonded stash/ctrl does not have an associated ledger") - }); - ensure!(raw.controller.is_none(), "raw storage controller should be None"); + // ensure locks consistency. + ensure!( + Self::inspect_bond_state(&stash) == Ok(LedgerIntegrityState::Ok), + "bond, ledger and/or staking lock inconsistent for a bonded stash." + ); // ensure ledger consistency. Self::ensure_ledger_consistent(ctrl) diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 6afbf12032d..2e5b3aa7b87 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -25,7 +25,7 @@ use frame_support::{ pallet_prelude::*, traits::{ Currency, Defensive, DefensiveSaturating, EnsureOrigin, EstimateNextNewSession, Get, - LockableCurrency, OnUnbalanced, UnixTime, + InspectLockableCurrency, LockableCurrency, OnUnbalanced, UnixTime, WithdrawReasons, }, weights::Weight, BoundedVec, @@ -48,9 +48,9 @@ pub use impls::*; use crate::{ slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, - EraRewardPoints, Exposure, ExposurePage, Forcing, MaxNominationsOf, NegativeImbalanceOf, - Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, - StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, + EraRewardPoints, Exposure, ExposurePage, Forcing, LedgerIntegrityState, MaxNominationsOf, + NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, + SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, }; // The speculative number of spans are used as an input of the weight annotation of @@ -88,10 +88,10 @@ pub mod pallet { pub trait Config: frame_system::Config { /// The staking balance. type Currency: LockableCurrency< - Self::AccountId, - Moment = BlockNumberFor, - Balance = Self::CurrencyBalance, - >; + Self::AccountId, + Moment = BlockNumberFor, + Balance = Self::CurrencyBalance, + > + InspectLockableCurrency; /// Just the `Currency::Balance` type; we have this item to allow us to constrain it to /// `From`. type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned @@ -796,6 +796,7 @@ pub mod pallet { } #[pallet::error] + #[derive(PartialEq)] pub enum Error { /// Not a controller account. NotController, @@ -855,6 +856,8 @@ pub mod pallet { BoundNotMet, /// Used when attempting to use deprecated controller account logic. ControllerDeprecated, + /// Cannot reset a ledger. + CannotRestoreLedger, } #[pallet::hooks] @@ -1980,6 +1983,108 @@ pub mod pallet { Ok(Some(T::WeightInfo::deprecate_controller_batch(controllers.len() as u32)).into()) } + + /// Restores the state of a ledger which is in an inconsistent state. + /// + /// The requirements to restore a ledger are the following: + /// * The stash is bonded; or + /// * The stash is not bonded but it has a staking lock left behind; or + /// * If the stash has an associated ledger and its state is inconsistent; or + /// * If the ledger is not corrupted *but* its staking lock is out of sync. + /// + /// The `maybe_*` input parameters will overwrite the corresponding data and metadata of the + /// ledger associated with the stash. If the input parameters are not set, the ledger will + /// be reset values from on-chain state. + #[pallet::call_index(29)] + #[pallet::weight(T::WeightInfo::restore_ledger())] + pub fn restore_ledger( + origin: OriginFor, + stash: T::AccountId, + maybe_controller: Option, + maybe_total: Option>, + maybe_unlocking: Option>, T::MaxUnlockingChunks>>, + ) -> DispatchResult { + T::AdminOrigin::ensure_origin(origin)?; + + let current_lock = T::Currency::balance_locked(crate::STAKING_ID, &stash); + let stash_balance = T::Currency::free_balance(&stash); + + let (new_controller, new_total) = match Self::inspect_bond_state(&stash) { + Ok(LedgerIntegrityState::Corrupted) => { + let new_controller = maybe_controller.unwrap_or(stash.clone()); + + let new_total = if let Some(total) = maybe_total { + let new_total = total.min(stash_balance); + // enforce lock == ledger.amount. + T::Currency::set_lock( + crate::STAKING_ID, + &stash, + new_total, + WithdrawReasons::all(), + ); + new_total + } else { + current_lock + }; + + Ok((new_controller, new_total)) + }, + Ok(LedgerIntegrityState::CorruptedKilled) => { + if current_lock == Zero::zero() { + // this case needs to restore both lock and ledger, so the new total needs + // to be given by the called since there's no way to restore the total + // on-chain. + ensure!(maybe_total.is_some(), Error::::CannotRestoreLedger); + Ok(( + stash.clone(), + maybe_total.expect("total exists as per the check above; qed."), + )) + } else { + Ok((stash.clone(), current_lock)) + } + }, + Ok(LedgerIntegrityState::LockCorrupted) => { + // ledger is not corrupted but its locks are out of sync. In this case, we need + // to enforce a new ledger.total and staking lock for this stash. + let new_total = + maybe_total.ok_or(Error::::CannotRestoreLedger)?.min(stash_balance); + T::Currency::set_lock( + crate::STAKING_ID, + &stash, + new_total, + WithdrawReasons::all(), + ); + + Ok((stash.clone(), new_total)) + }, + Err(Error::::BadState) => { + // the stash and ledger do not exist but lock is lingering. + T::Currency::remove_lock(crate::STAKING_ID, &stash); + ensure!( + Self::inspect_bond_state(&stash) == Err(Error::::NotStash), + Error::::BadState + ); + + return Ok(()); + }, + Ok(LedgerIntegrityState::Ok) | Err(_) => Err(Error::::CannotRestoreLedger), + }?; + + // re-bond stash and controller tuple. + Bonded::::insert(&stash, &new_controller); + + // resoter ledger state. + let mut ledger = StakingLedger::::new(stash.clone(), new_total); + ledger.controller = Some(new_controller); + ledger.unlocking = maybe_unlocking.unwrap_or_default(); + ledger.update()?; + + ensure!( + Self::inspect_bond_state(&stash) == Ok(LedgerIntegrityState::Ok), + Error::::BadState + ); + Ok(()) + } } } diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index ef156e19552..a5c9abe2f17 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -6933,40 +6933,43 @@ mod ledger { setup_double_bonded_ledgers(); // Case 1: double bonded but not corrupted: - // stash 2 has controller 3: - assert_eq!(Bonded::::get(2), Some(3)); - assert_eq!(Ledger::::get(3).unwrap().stash, 2); + // stash 444 has controller 555: + assert_eq!(Bonded::::get(444), Some(555)); + assert_eq!(Ledger::::get(555).unwrap().stash, 444); - // stash 2 is also a controller of 1: - assert_eq!(Bonded::::get(1), Some(2)); - assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(1)), Some(2)); - assert_eq!(Ledger::::get(2).unwrap().stash, 1); + // stash 444 is also a controller of 333: + assert_eq!(Bonded::::get(333), Some(444)); + assert_eq!( + StakingLedger::::paired_account(StakingAccount::Stash(333)), + Some(444) + ); + assert_eq!(Ledger::::get(444).unwrap().stash, 333); - // although 2 is double bonded (it is a controller and a stash of different ledgers), + // although 444 is double bonded (it is a controller and a stash of different ledgers), // we can safely retrieve the ledger and mutate it since the correct ledger is // returned. - let ledger_result = StakingLedger::::get(StakingAccount::Stash(2)); - assert_eq!(ledger_result.unwrap().stash, 2); // correct ledger. + let ledger_result = StakingLedger::::get(StakingAccount::Stash(444)); + assert_eq!(ledger_result.unwrap().stash, 444); // correct ledger. - let ledger_result = StakingLedger::::get(StakingAccount::Controller(2)); - assert_eq!(ledger_result.unwrap().stash, 1); // correct ledger. + let ledger_result = StakingLedger::::get(StakingAccount::Controller(444)); + assert_eq!(ledger_result.unwrap().stash, 333); // correct ledger. - // fetching ledger 1 by its stash works. - let ledger_result = StakingLedger::::get(StakingAccount::Stash(1)); - assert_eq!(ledger_result.unwrap().stash, 1); + // fetching ledger 333 by its stash works. + let ledger_result = StakingLedger::::get(StakingAccount::Stash(333)); + assert_eq!(ledger_result.unwrap().stash, 333); // Case 2: corrupted ledger bonding. // in this case, we simulate what happens when fetching a ledger by stash returns a // ledger with a different stash. when this happens, we return an error instead of the // ledger to prevent ledger mutations. - let mut ledger = Ledger::::get(2).unwrap(); - assert_eq!(ledger.stash, 1); - ledger.stash = 2; - Ledger::::insert(2, ledger); + let mut ledger = Ledger::::get(444).unwrap(); + assert_eq!(ledger.stash, 333); + ledger.stash = 444; + Ledger::::insert(444, ledger); // now, we are prevented from fetching the ledger by stash from 1. It's associated // controller (2) is now bonding a ledger with a different stash (2, not 1). - assert!(StakingLedger::::get(StakingAccount::Stash(1)).is_err()); + assert!(StakingLedger::::get(StakingAccount::Stash(333)).is_err()); }) } @@ -7069,7 +7072,7 @@ mod ledger { #[test] fn deprecate_controller_batch_works_full_weight() { - ExtBuilder::default().build_and_execute(|| { + ExtBuilder::default().try_state(false).build_and_execute(|| { // Given: let start = 1001; @@ -7253,7 +7256,7 @@ mod ledger { let bounded_controllers: BoundedVec< _, ::MaxControllersInDeprecationBatch, - > = BoundedVec::try_from(vec![1, 2, 3, 4]).unwrap(); + > = BoundedVec::try_from(vec![333, 444, 555, 777]).unwrap(); assert_ok!(Staking::deprecate_controller_batch( RuntimeOrigin::root(), @@ -7276,7 +7279,7 @@ mod ledger { let bounded_controllers: BoundedVec< _, ::MaxControllersInDeprecationBatch, - > = BoundedVec::try_from(vec![4, 3, 2, 1]).unwrap(); + > = BoundedVec::try_from(vec![777, 555, 444, 333]).unwrap(); assert_ok!(Staking::deprecate_controller_batch( RuntimeOrigin::root(), @@ -7296,9 +7299,9 @@ mod ledger { setup_double_bonded_ledgers(); // in this case, setting controller works due to the ordering of the calls. - assert_ok!(Staking::set_controller(RuntimeOrigin::signed(1))); - assert_ok!(Staking::set_controller(RuntimeOrigin::signed(2))); - assert_ok!(Staking::set_controller(RuntimeOrigin::signed(3))); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(333))); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(444))); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(555))); }) } @@ -7307,17 +7310,400 @@ mod ledger { ExtBuilder::default().has_stakers(false).try_state(false).build_and_execute(|| { setup_double_bonded_ledgers(); - // setting the controller of ledger associated with stash 3 fails since its stash is a + // setting the controller of ledger associated with stash 555 fails since its stash is a // controller of another ledger. assert_noop!( - Staking::set_controller(RuntimeOrigin::signed(3)), + Staking::set_controller(RuntimeOrigin::signed(555)), Error::::BadState ); assert_noop!( - Staking::set_controller(RuntimeOrigin::signed(2)), + Staking::set_controller(RuntimeOrigin::signed(444)), Error::::BadState ); - assert_ok!(Staking::set_controller(RuntimeOrigin::signed(1))); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(333))); + }) + } +} + +mod ledger_recovery { + use super::*; + use frame_support::traits::InspectLockableCurrency; + + #[test] + fn inspect_recovery_ledger_simple_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // non corrupted ledger. + assert_eq!(Staking::inspect_bond_state(&11).unwrap(), LedgerIntegrityState::Ok); + + // non bonded stash. + assert!(Bonded::::get(&1111).is_none()); + assert!(Staking::inspect_bond_state(&1111).is_err()); + + // double bonded but not corrupted. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + }) + } + + #[test] + fn inspect_recovery_ledger_corupted_killed_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + let lock_333_before = Balances::balance_locked(crate::STAKING_ID, &333); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(333) + // (444, 444) -> corrupted and None. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 however is OK. + assert_eq!(Staking::inspect_bond_state(&444).unwrap(), LedgerIntegrityState::Ok); + + // kill the corrupted ledger that is associated with stash 333. + assert_ok!(StakingLedger::::kill(&333)); + + // 333 bond is no more but it returns `BadState` because the lock on this stash is + // still set (see checks below). + assert_eq!(Staking::inspect_bond_state(&333), Err(Error::::BadState)); + // now the *other* ledger associated with 444 has been corrupted and killed (None). + assert_eq!( + Staking::inspect_bond_state(&444), + Ok(LedgerIntegrityState::CorruptedKilled) + ); + + // side effects on 333 - ledger, bonded, payee, lock should be completely empty. + // however, 333 lock remains. + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), lock_333_before); // NOK + assert!(Bonded::::get(&333).is_none()); // OK + assert!(Payee::::get(&333).is_none()); // OK + assert!(Ledger::::get(&444).is_none()); // OK + + // side effects on 444 - ledger, bonded, payee, lock should remain be intact. + // however, 444 lock was removed. + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), 0); // NOK + assert!(Bonded::::get(&444).is_some()); // OK + assert!(Payee::::get(&444).is_some()); // OK + assert!(Ledger::::get(&555).is_none()); // NOK + + assert!(Staking::do_try_state(System::block_number()).is_err()); + }) + } + + #[test] + fn inspect_recovery_ledger_corupted_killed_other_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + let lock_333_before = Balances::balance_locked(crate::STAKING_ID, &333); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(444) + // (333, 444) -> corrupted and None + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 however is OK. + assert_eq!(Staking::inspect_bond_state(&444).unwrap(), LedgerIntegrityState::Ok); + + // kill the *other* ledger that is double bonded but not corrupted. + assert_ok!(StakingLedger::::kill(&444)); + + // now 333 is corrupted and None through the *other* ledger being killed. + assert_eq!( + Staking::inspect_bond_state(&333).unwrap(), + LedgerIntegrityState::CorruptedKilled, + ); + // 444 is cleaned and not a stash anymore; no lock left behind. + assert_eq!(Ledger::::get(&444), None); + assert_eq!(Staking::inspect_bond_state(&444), Err(Error::::NotStash)); + + // side effects on 333 - ledger, bonded, payee, lock should be intact. + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), lock_333_before); // OK + assert_eq!(Bonded::::get(&333), Some(444)); // OK + assert!(Payee::::get(&333).is_some()); // OK + // however, ledger associated with its controller was killed. + assert!(Ledger::::get(&444).is_none()); // NOK + + // side effects on 444 - ledger, bonded, payee, lock should be completely removed. + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), 0); // OK + assert!(Bonded::::get(&444).is_none()); // OK + assert!(Payee::::get(&444).is_none()); // OK + assert!(Ledger::::get(&555).is_none()); // OK + + assert!(Staking::do_try_state(System::block_number()).is_err()); + }) + } + + #[test] + fn inspect_recovery_ledger_lock_corrupted_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // get into lock corrupted ledger state by bond_extra on a ledger that is double bonded + // with a corrupted ledger. + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // bond_extra(333, 10) -> lock corrupted on 444 + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + bond_extra_no_checks(&333, 10); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 ledger is not corrupted but locks got out of sync. + assert_eq!( + Staking::inspect_bond_state(&444).unwrap(), + LedgerIntegrityState::LockCorrupted + ); + }) + } + + // Corrupted ledger restore. + // + // * Double bonded and corrupted ledger. + #[test] + fn restore_ledger_corrupted_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // get into corrupted and killed ledger state. + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // recover the ledger bonded by 333 stash. + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); + }) + } + + // Corrupted and killed ledger restore. + // + // * Double bonded and corrupted ledger. + // * Ledger killed by own controller. + #[test] + fn restore_ledger_corrupted_killed_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // ledger.total == lock + let total_444_before_corruption = Balances::balance_locked(crate::STAKING_ID, &444); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(333) + // (444, 444) -> corrupted and None. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // kill the corrupted ledger that is associated with stash 333. + assert_ok!(StakingLedger::::kill(&333)); + + // 333 bond is no more but it returns `BadState` because the lock on this stash is + // still set (see checks below). + assert_eq!(Staking::inspect_bond_state(&333), Err(Error::::BadState)); + // now the *other* ledger associated with 444 has been corrupted and killed (None). + assert!(Staking::ledger(StakingAccount::Stash(444)).is_err()); + + // try-state should fail. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // recover the ledger bonded by 333 stash. + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + + // for the try-state checks to pass, we also need to recover the stash 444 which is + // corrupted too by proxy of kill(333). Currently, both the lock and the ledger of 444 + // have been cleared so we need to provide the new amount to restore the ledger. + assert_noop!( + Staking::restore_ledger(RuntimeOrigin::root(), 444, None, None, None), + Error::::CannotRestoreLedger + ); + + assert_ok!(Staking::restore_ledger( + RuntimeOrigin::root(), + 444, + None, + Some(total_444_before_corruption), + None, + )); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); + }) + } + + // Corrupted and killed by *other* ledger restore. + // + // * Double bonded and corrupted ledger. + // * Ledger killed by own controller. + #[test] + fn restore_ledger_corrupted_killed_other_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(444) + // (333, 444) -> corrupted and None + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 however is OK. + assert_eq!(Staking::inspect_bond_state(&444).unwrap(), LedgerIntegrityState::Ok); + + // kill the *other* ledger that is double bonded but not corrupted. + assert_ok!(StakingLedger::::kill(&444)); + + // recover the ledger bonded by 333 stash. + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + + // 444 does not need recover in this case since it's been killed successfully. + assert_eq!(Staking::inspect_bond_state(&444), Err(Error::::NotStash)); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); + }) + } + + // Corrupted with bond_extra. + // + // * Double bonded and corrupted ledger. + // * Corrupted ledger calls `bond_extra` + #[test] + fn restore_ledger_corrupted_bond_extra_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + let lock_333_before = Balances::balance_locked(crate::STAKING_ID, &333); + let lock_444_before = Balances::balance_locked(crate::STAKING_ID, &444); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // bond_extra(444, 40) -> OK + // bond_extra(333, 30) -> locks out of sync + + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // if 444 bonds extra, the locks remain in sync. + bond_extra_no_checks(&444, 40); + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), lock_333_before); + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), lock_444_before + 40); + + // however if 333 bonds extra, the wrong lock is updated. + bond_extra_no_checks(&333, 30); + assert_eq!( + Balances::balance_locked(crate::STAKING_ID, &333), + lock_444_before + 40 + 30 + ); //not OK + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), lock_444_before + 40); // OK + + // recover the ledger bonded by 333 stash. Note that the total/lock needs to be + // re-written since on-chain data lock has become out of sync. + assert_ok!(Staking::restore_ledger( + RuntimeOrigin::root(), + 333, + None, + Some(lock_333_before + 30), + None + )); + + // now recover 444 that although it's not corrupted, its lock and ledger.total are out + // of sync. in which case, we need to explicitly set the ledger's lock and amount, + // otherwise the ledger recover will fail. + assert_noop!( + Staking::restore_ledger(RuntimeOrigin::root(), 444, None, None, None), + Error::::CannotRestoreLedger + ); + + //and enforcing a new ledger lock/total on this non-corrupted ledger will work. + assert_ok!(Staking::restore_ledger( + RuntimeOrigin::root(), + 444, + None, + Some(lock_444_before + 40), + None + )); + + // double-check that ledgers got to expected state and bond_extra done during the + // corrupted state is part of the recovered ledgers. + let ledger_333 = Bonded::::get(&333).and_then(Ledger::::get).unwrap(); + let ledger_444 = Bonded::::get(&444).and_then(Ledger::::get).unwrap(); + + assert_eq!(ledger_333.total, lock_333_before + 30); + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), ledger_333.total); + assert_eq!(ledger_444.total, lock_444_before + 40); + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), ledger_444.total); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); }) } } diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index 6f729e08ba5..8a04a3dfb3f 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_staking` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-q7z7ruxr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -80,6 +80,7 @@ pub trait WeightInfo { fn chill_other() -> Weight; fn force_apply_min_commission() -> Weight; fn set_min_commission() -> Weight; + fn restore_ledger() -> Weight; } /// Weights for `pallet_staking` using the Substrate node and recommended hardware. @@ -87,21 +88,21 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:0 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `927` + // Measured: `1042` // Estimated: `4764` - // Minimum execution time: 42_042_000 picoseconds. - Weight::from_parts(43_292_000, 4764) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Minimum execution time: 48_753_000 picoseconds. + Weight::from_parts(50_539_000, 4764) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) @@ -120,21 +121,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 85_050_000 picoseconds. - Weight::from_parts(87_567_000, 8877) + // Minimum execution time: 92_701_000 picoseconds. + Weight::from_parts(95_657_000, 8877) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -147,41 +148,43 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 89_076_000 picoseconds. - Weight::from_parts(92_715_000, 8877) + // Minimum execution time: 101_049_000 picoseconds. + Weight::from_parts(103_729_000, 8877) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1115` + // Measured: `1297` // Estimated: `4764` - // Minimum execution time: 42_067_000 picoseconds. - Weight::from_parts(43_239_807, 4764) - // Standard Error: 831 - .saturating_add(Weight::from_parts(46_257, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Minimum execution time: 51_672_000 picoseconds. + Weight::from_parts(53_817_441, 4764) + // Standard Error: 1_124 + .saturating_add(Weight::from_parts(49_168, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Bonded` (r:1 w:1) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -207,10 +210,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_490_000 picoseconds. - Weight::from_parts(95_358_751, 6248) - // Standard Error: 3_952 - .saturating_add(Weight::from_parts(1_294_907, 0).saturating_mul(s.into())) + // Minimum execution time: 92_846_000 picoseconds. + Weight::from_parts(102_158_606, 6248) + // Standard Error: 4_187 + .saturating_add(Weight::from_parts(1_436_364, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -218,6 +221,8 @@ impl WeightInfo for SubstrateWeight { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinValidatorBond` (r:1 w:0) /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MinCommission` (r:1 w:0) @@ -228,8 +233,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:1 w:1) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -242,31 +245,35 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 50_326_000 picoseconds. - Weight::from_parts(52_253_000, 4556) + // Minimum execution time: 58_162_000 picoseconds. + Weight::from_parts(60_124_000, 4556) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:128 w:128) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1280 + k * (569 ±0)` + // Measured: `1815 + k * (572 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 29_305_000 picoseconds. - Weight::from_parts(32_199_604, 4556) - // Standard Error: 7_150 - .saturating_add(Weight::from_parts(6_437_124, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 37_950_000 picoseconds. + Weight::from_parts(34_461_075, 4556) + // Standard Error: 8_013 + .saturating_add(Weight::from_parts(6_696_510, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -277,8 +284,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:2 w:2) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -292,10 +297,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 63_267_000 picoseconds. - Weight::from_parts(61_741_404, 6248) - // Standard Error: 12_955 - .saturating_add(Weight::from_parts(3_811_743, 0).saturating_mul(n.into())) + // Minimum execution time: 70_167_000 picoseconds. + Weight::from_parts(68_024_084, 6248) + // Standard Error: 14_256 + .saturating_add(Weight::from_parts(4_195_757, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -303,6 +308,8 @@ impl WeightInfo for SubstrateWeight { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -317,11 +324,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1650` + // Measured: `1816` // Estimated: `6248` - // Minimum execution time: 52_862_000 picoseconds. - Weight::from_parts(54_108_000, 6248) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Minimum execution time: 61_730_000 picoseconds. + Weight::from_parts(63_430_000, 6248) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) @@ -334,37 +341,37 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_350_000 picoseconds. - Weight::from_parts(16_802_000, 4556) + // Minimum execution time: 20_857_000 picoseconds. + Weight::from_parts(21_615_000, 4556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:1 w:1) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn update_payee() -> Weight { // Proof Size summary in bytes: // Measured: `969` // Estimated: `4556` - // Minimum execution time: 19_981_000 picoseconds. - Weight::from_parts(20_539_000, 4556) + // Minimum execution time: 24_739_000 picoseconds. + Weight::from_parts(25_785_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:2) + /// Storage: `Staking::Ledger` (r:2 w:2) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_controller() -> Weight { // Proof Size summary in bytes: // Measured: `902` - // Estimated: `4556` - // Minimum execution time: 19_304_000 picoseconds. - Weight::from_parts(20_000_000, 4556) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Estimated: `8122` + // Minimum execution time: 24_622_000 picoseconds. + Weight::from_parts(25_220_000, 8122) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Staking::ValidatorCount` (r:0 w:1) @@ -373,8 +380,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_568_000 picoseconds. - Weight::from_parts(2_708_000, 0) + // Minimum execution time: 2_634_000 picoseconds. + Weight::from_parts(2_842_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -383,8 +390,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_950_000 picoseconds. - Weight::from_parts(8_348_000, 0) + // Minimum execution time: 8_496_000 picoseconds. + Weight::from_parts(9_016_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -393,8 +400,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_967_000 picoseconds. - Weight::from_parts(8_222_000, 0) + // Minimum execution time: 8_510_000 picoseconds. + Weight::from_parts(8_893_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -403,8 +410,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_006_000 picoseconds. - Weight::from_parts(8_440_000, 0) + // Minimum execution time: 8_243_000 picoseconds. + Weight::from_parts(8_678_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -414,30 +421,30 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_524_000 picoseconds. - Weight::from_parts(3_123_608, 0) - // Standard Error: 59 - .saturating_add(Weight::from_parts(11_596, 0).saturating_mul(v.into())) + // Minimum execution time: 2_781_000 picoseconds. + Weight::from_parts(3_441_708, 0) + // Standard Error: 58 + .saturating_add(Weight::from_parts(11_811, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Staking::Ledger` (r:5900 w:11800) + /// Storage: `Staking::Ledger` (r:11800 w:11800) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:5900 w:5900) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:5900 w:0) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:0 w:5900) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5900]`. fn deprecate_controller_batch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1356 + i * (151 ±0)` - // Estimated: `990 + i * (3566 ±0)` - // Minimum execution time: 2_092_000 picoseconds. - Weight::from_parts(2_258_000, 990) - // Standard Error: 32_695 - .saturating_add(Weight::from_parts(16_669_219, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) + // Measured: `1746 + i * (229 ±0)` + // Estimated: `990 + i * (7132 ±0)` + // Minimum execution time: 5_331_000 picoseconds. + Weight::from_parts(5_511_000, 990) + // Standard Error: 66_734 + .saturating_add(Weight::from_parts(31_157_413, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(0, 7132).saturating_mul(i.into())) } /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -472,10 +479,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 84_275_000 picoseconds. - Weight::from_parts(92_512_416, 6248) - // Standard Error: 3_633 - .saturating_add(Weight::from_parts(1_315_923, 0).saturating_mul(s.into())) + // Minimum execution time: 89_473_000 picoseconds. + Weight::from_parts(98_055_990, 6248) + // Standard Error: 4_159 + .saturating_add(Weight::from_parts(1_398_203, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -488,10 +495,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 101_707_000 picoseconds. - Weight::from_parts(912_819_462, 70137) - // Standard Error: 57_547 - .saturating_add(Weight::from_parts(4_856_799, 0).saturating_mul(s.into())) + // Minimum execution time: 102_480_000 picoseconds. + Weight::from_parts(1_165_789_820, 70137) + // Standard Error: 77_157 + .saturating_add(Weight::from_parts(6_489_253, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -528,10 +535,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `33297 + n * (377 ±0)` // Estimated: `30944 + n * (3774 ±0)` - // Minimum execution time: 138_657_000 picoseconds. - Weight::from_parts(167_173_445, 30944) - // Standard Error: 25_130 - .saturating_add(Weight::from_parts(44_566_012, 0).saturating_mul(n.into())) + // Minimum execution time: 156_890_000 picoseconds. + Weight::from_parts(202_972_688, 30944) + // Standard Error: 29_972 + .saturating_add(Weight::from_parts(48_226_698, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -555,10 +562,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 80_061_000 picoseconds. - Weight::from_parts(82_836_434, 8877) - // Standard Error: 4_348 - .saturating_add(Weight::from_parts(75_744, 0).saturating_mul(l.into())) + // Minimum execution time: 88_482_000 picoseconds. + Weight::from_parts(92_616_600, 8877) + // Standard Error: 4_411 + .saturating_add(Weight::from_parts(117_722, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -593,10 +600,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 92_560_000 picoseconds. - Weight::from_parts(97_684_741, 6248) - // Standard Error: 3_361 - .saturating_add(Weight::from_parts(1_292_732, 0).saturating_mul(s.into())) + // Minimum execution time: 98_489_000 picoseconds. + Weight::from_parts(102_968_643, 6248) + // Standard Error: 4_823 + .saturating_add(Weight::from_parts(1_420_838, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -642,12 +649,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 564_963_000 picoseconds. - Weight::from_parts(569_206_000, 512390) - // Standard Error: 2_033_235 - .saturating_add(Weight::from_parts(68_025_841, 0).saturating_mul(v.into())) - // Standard Error: 202_600 - .saturating_add(Weight::from_parts(17_916_770, 0).saturating_mul(n.into())) + // Minimum execution time: 604_820_000 picoseconds. + Weight::from_parts(608_838_000, 512390) + // Standard Error: 2_300_345 + .saturating_add(Weight::from_parts(72_980_573, 0).saturating_mul(v.into())) + // Standard Error: 229_216 + .saturating_add(Weight::from_parts(20_739_416, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(206_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -678,12 +685,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 32_196_540_000 picoseconds. - Weight::from_parts(32_341_871_000, 512390) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(5_143_440, 0).saturating_mul(v.into())) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(3_328_189, 0).saturating_mul(n.into())) + // Minimum execution time: 37_380_439_000 picoseconds. + Weight::from_parts(38_187_734_000, 512390) + // Standard Error: 425_319 + .saturating_add(Weight::from_parts(6_001_288, 0).saturating_mul(v.into())) + // Standard Error: 425_319 + .saturating_add(Weight::from_parts(4_129_446, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(201_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -700,10 +707,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_381_903_000 picoseconds. - Weight::from_parts(32_693_059, 3510) - // Standard Error: 10_000 - .saturating_add(Weight::from_parts(4_736_173, 0).saturating_mul(v.into())) + // Minimum execution time: 2_572_838_000 picoseconds. + Weight::from_parts(67_632_557, 3510) + // Standard Error: 12_028 + .saturating_add(Weight::from_parts(5_117_459, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -714,6 +721,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -724,9 +733,9 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_434_000 picoseconds. - Weight::from_parts(5_742_000, 0) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Minimum execution time: 5_962_000 picoseconds. + Weight::from_parts(6_497_000, 0) + .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -734,6 +743,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -744,9 +755,9 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_588_000 picoseconds. - Weight::from_parts(4_854_000, 0) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Minimum execution time: 5_227_000 picoseconds. + Weight::from_parts(5_496_000, 0) + .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) @@ -774,8 +785,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1939` // Estimated: `6248` - // Minimum execution time: 68_780_000 picoseconds. - Weight::from_parts(71_479_000, 6248) + // Minimum execution time: 75_129_000 picoseconds. + Weight::from_parts(77_498_000, 6248) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -787,8 +798,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_268_000 picoseconds. - Weight::from_parts(12_661_000, 3510) + // Minimum execution time: 13_488_000 picoseconds. + Weight::from_parts(14_183_000, 3510) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -798,31 +809,50 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_071_000 picoseconds. - Weight::from_parts(3_334_000, 0) + // Minimum execution time: 3_368_000 picoseconds. + Weight::from_parts(3_582_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + fn restore_ledger() -> Weight { + // Proof Size summary in bytes: + // Measured: `1047` + // Estimated: `4764` + // Minimum execution time: 44_876_000 picoseconds. + Weight::from_parts(46_353_000, 4764) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } } // For backwards compatibility and tests. impl WeightInfo for () { /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:0 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `927` + // Measured: `1042` // Estimated: `4764` - // Minimum execution time: 42_042_000 picoseconds. - Weight::from_parts(43_292_000, 4764) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Minimum execution time: 48_753_000 picoseconds. + Weight::from_parts(50_539_000, 4764) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) @@ -841,21 +871,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 85_050_000 picoseconds. - Weight::from_parts(87_567_000, 8877) + // Minimum execution time: 92_701_000 picoseconds. + Weight::from_parts(95_657_000, 8877) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -868,41 +898,43 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 89_076_000 picoseconds. - Weight::from_parts(92_715_000, 8877) + // Minimum execution time: 101_049_000 picoseconds. + Weight::from_parts(103_729_000, 8877) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1115` + // Measured: `1297` // Estimated: `4764` - // Minimum execution time: 42_067_000 picoseconds. - Weight::from_parts(43_239_807, 4764) - // Standard Error: 831 - .saturating_add(Weight::from_parts(46_257, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Minimum execution time: 51_672_000 picoseconds. + Weight::from_parts(53_817_441, 4764) + // Standard Error: 1_124 + .saturating_add(Weight::from_parts(49_168, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Bonded` (r:1 w:1) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -928,10 +960,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_490_000 picoseconds. - Weight::from_parts(95_358_751, 6248) - // Standard Error: 3_952 - .saturating_add(Weight::from_parts(1_294_907, 0).saturating_mul(s.into())) + // Minimum execution time: 92_846_000 picoseconds. + Weight::from_parts(102_158_606, 6248) + // Standard Error: 4_187 + .saturating_add(Weight::from_parts(1_436_364, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -939,6 +971,8 @@ impl WeightInfo for () { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinValidatorBond` (r:1 w:0) /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MinCommission` (r:1 w:0) @@ -949,8 +983,6 @@ impl WeightInfo for () { /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:1 w:1) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -963,31 +995,35 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 50_326_000 picoseconds. - Weight::from_parts(52_253_000, 4556) + // Minimum execution time: 58_162_000 picoseconds. + Weight::from_parts(60_124_000, 4556) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:128 w:128) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1280 + k * (569 ±0)` + // Measured: `1815 + k * (572 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 29_305_000 picoseconds. - Weight::from_parts(32_199_604, 4556) - // Standard Error: 7_150 - .saturating_add(Weight::from_parts(6_437_124, 0).saturating_mul(k.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 37_950_000 picoseconds. + Weight::from_parts(34_461_075, 4556) + // Standard Error: 8_013 + .saturating_add(Weight::from_parts(6_696_510, 0).saturating_mul(k.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -998,8 +1034,6 @@ impl WeightInfo for () { /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:2 w:2) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -1013,10 +1047,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 63_267_000 picoseconds. - Weight::from_parts(61_741_404, 6248) - // Standard Error: 12_955 - .saturating_add(Weight::from_parts(3_811_743, 0).saturating_mul(n.into())) + // Minimum execution time: 70_167_000 picoseconds. + Weight::from_parts(68_024_084, 6248) + // Standard Error: 14_256 + .saturating_add(Weight::from_parts(4_195_757, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -1024,6 +1058,8 @@ impl WeightInfo for () { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -1038,11 +1074,11 @@ impl WeightInfo for () { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1650` + // Measured: `1816` // Estimated: `6248` - // Minimum execution time: 52_862_000 picoseconds. - Weight::from_parts(54_108_000, 6248) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Minimum execution time: 61_730_000 picoseconds. + Weight::from_parts(63_430_000, 6248) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) @@ -1055,37 +1091,37 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_350_000 picoseconds. - Weight::from_parts(16_802_000, 4556) + // Minimum execution time: 20_857_000 picoseconds. + Weight::from_parts(21_615_000, 4556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:1 w:1) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn update_payee() -> Weight { // Proof Size summary in bytes: // Measured: `969` // Estimated: `4556` - // Minimum execution time: 19_981_000 picoseconds. - Weight::from_parts(20_539_000, 4556) + // Minimum execution time: 24_739_000 picoseconds. + Weight::from_parts(25_785_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:2) + /// Storage: `Staking::Ledger` (r:2 w:2) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_controller() -> Weight { // Proof Size summary in bytes: // Measured: `902` - // Estimated: `4556` - // Minimum execution time: 19_304_000 picoseconds. - Weight::from_parts(20_000_000, 4556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Estimated: `8122` + // Minimum execution time: 24_622_000 picoseconds. + Weight::from_parts(25_220_000, 8122) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Staking::ValidatorCount` (r:0 w:1) @@ -1094,8 +1130,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_568_000 picoseconds. - Weight::from_parts(2_708_000, 0) + // Minimum execution time: 2_634_000 picoseconds. + Weight::from_parts(2_842_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1104,8 +1140,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_950_000 picoseconds. - Weight::from_parts(8_348_000, 0) + // Minimum execution time: 8_496_000 picoseconds. + Weight::from_parts(9_016_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1114,8 +1150,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_967_000 picoseconds. - Weight::from_parts(8_222_000, 0) + // Minimum execution time: 8_510_000 picoseconds. + Weight::from_parts(8_893_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1124,8 +1160,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_006_000 picoseconds. - Weight::from_parts(8_440_000, 0) + // Minimum execution time: 8_243_000 picoseconds. + Weight::from_parts(8_678_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -1135,30 +1171,30 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_524_000 picoseconds. - Weight::from_parts(3_123_608, 0) - // Standard Error: 59 - .saturating_add(Weight::from_parts(11_596, 0).saturating_mul(v.into())) + // Minimum execution time: 2_781_000 picoseconds. + Weight::from_parts(3_441_708, 0) + // Standard Error: 58 + .saturating_add(Weight::from_parts(11_811, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Staking::Ledger` (r:5900 w:11800) + /// Storage: `Staking::Ledger` (r:11800 w:11800) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:5900 w:5900) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:5900 w:0) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:0 w:5900) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5900]`. fn deprecate_controller_batch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1356 + i * (151 ±0)` - // Estimated: `990 + i * (3566 ±0)` - // Minimum execution time: 2_092_000 picoseconds. - Weight::from_parts(2_258_000, 990) - // Standard Error: 32_695 - .saturating_add(Weight::from_parts(16_669_219, 0).saturating_mul(i.into())) - .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(i.into()))) + // Measured: `1746 + i * (229 ±0)` + // Estimated: `990 + i * (7132 ±0)` + // Minimum execution time: 5_331_000 picoseconds. + Weight::from_parts(5_511_000, 990) + // Standard Error: 66_734 + .saturating_add(Weight::from_parts(31_157_413, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(0, 7132).saturating_mul(i.into())) } /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -1193,10 +1229,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 84_275_000 picoseconds. - Weight::from_parts(92_512_416, 6248) - // Standard Error: 3_633 - .saturating_add(Weight::from_parts(1_315_923, 0).saturating_mul(s.into())) + // Minimum execution time: 89_473_000 picoseconds. + Weight::from_parts(98_055_990, 6248) + // Standard Error: 4_159 + .saturating_add(Weight::from_parts(1_398_203, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1209,10 +1245,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 101_707_000 picoseconds. - Weight::from_parts(912_819_462, 70137) - // Standard Error: 57_547 - .saturating_add(Weight::from_parts(4_856_799, 0).saturating_mul(s.into())) + // Minimum execution time: 102_480_000 picoseconds. + Weight::from_parts(1_165_789_820, 70137) + // Standard Error: 77_157 + .saturating_add(Weight::from_parts(6_489_253, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1249,10 +1285,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `33297 + n * (377 ±0)` // Estimated: `30944 + n * (3774 ±0)` - // Minimum execution time: 138_657_000 picoseconds. - Weight::from_parts(167_173_445, 30944) - // Standard Error: 25_130 - .saturating_add(Weight::from_parts(44_566_012, 0).saturating_mul(n.into())) + // Minimum execution time: 156_890_000 picoseconds. + Weight::from_parts(202_972_688, 30944) + // Standard Error: 29_972 + .saturating_add(Weight::from_parts(48_226_698, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -1276,10 +1312,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 80_061_000 picoseconds. - Weight::from_parts(82_836_434, 8877) - // Standard Error: 4_348 - .saturating_add(Weight::from_parts(75_744, 0).saturating_mul(l.into())) + // Minimum execution time: 88_482_000 picoseconds. + Weight::from_parts(92_616_600, 8877) + // Standard Error: 4_411 + .saturating_add(Weight::from_parts(117_722, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -1314,10 +1350,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 92_560_000 picoseconds. - Weight::from_parts(97_684_741, 6248) - // Standard Error: 3_361 - .saturating_add(Weight::from_parts(1_292_732, 0).saturating_mul(s.into())) + // Minimum execution time: 98_489_000 picoseconds. + Weight::from_parts(102_968_643, 6248) + // Standard Error: 4_823 + .saturating_add(Weight::from_parts(1_420_838, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1363,12 +1399,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 564_963_000 picoseconds. - Weight::from_parts(569_206_000, 512390) - // Standard Error: 2_033_235 - .saturating_add(Weight::from_parts(68_025_841, 0).saturating_mul(v.into())) - // Standard Error: 202_600 - .saturating_add(Weight::from_parts(17_916_770, 0).saturating_mul(n.into())) + // Minimum execution time: 604_820_000 picoseconds. + Weight::from_parts(608_838_000, 512390) + // Standard Error: 2_300_345 + .saturating_add(Weight::from_parts(72_980_573, 0).saturating_mul(v.into())) + // Standard Error: 229_216 + .saturating_add(Weight::from_parts(20_739_416, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(206_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1399,12 +1435,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 32_196_540_000 picoseconds. - Weight::from_parts(32_341_871_000, 512390) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(5_143_440, 0).saturating_mul(v.into())) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(3_328_189, 0).saturating_mul(n.into())) + // Minimum execution time: 37_380_439_000 picoseconds. + Weight::from_parts(38_187_734_000, 512390) + // Standard Error: 425_319 + .saturating_add(Weight::from_parts(6_001_288, 0).saturating_mul(v.into())) + // Standard Error: 425_319 + .saturating_add(Weight::from_parts(4_129_446, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(201_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1421,10 +1457,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_381_903_000 picoseconds. - Weight::from_parts(32_693_059, 3510) - // Standard Error: 10_000 - .saturating_add(Weight::from_parts(4_736_173, 0).saturating_mul(v.into())) + // Minimum execution time: 2_572_838_000 picoseconds. + Weight::from_parts(67_632_557, 3510) + // Standard Error: 12_028 + .saturating_add(Weight::from_parts(5_117_459, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -1435,6 +1471,8 @@ impl WeightInfo for () { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -1445,9 +1483,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_434_000 picoseconds. - Weight::from_parts(5_742_000, 0) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Minimum execution time: 5_962_000 picoseconds. + Weight::from_parts(6_497_000, 0) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -1455,6 +1493,8 @@ impl WeightInfo for () { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -1465,9 +1505,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_588_000 picoseconds. - Weight::from_parts(4_854_000, 0) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Minimum execution time: 5_227_000 picoseconds. + Weight::from_parts(5_496_000, 0) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) @@ -1495,8 +1535,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1939` // Estimated: `6248` - // Minimum execution time: 68_780_000 picoseconds. - Weight::from_parts(71_479_000, 6248) + // Minimum execution time: 75_129_000 picoseconds. + Weight::from_parts(77_498_000, 6248) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1508,8 +1548,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_268_000 picoseconds. - Weight::from_parts(12_661_000, 3510) + // Minimum execution time: 13_488_000 picoseconds. + Weight::from_parts(14_183_000, 3510) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1519,8 +1559,27 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_071_000 picoseconds. - Weight::from_parts(3_334_000, 0) + // Minimum execution time: 3_368_000 picoseconds. + Weight::from_parts(3_582_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + fn restore_ledger() -> Weight { + // Proof Size summary in bytes: + // Measured: `1047` + // Estimated: `4764` + // Minimum execution time: 44_876_000 picoseconds. + Weight::from_parts(46_353_000, 4764) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } } diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 1997d8fc223..24e7e1c8a65 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -22,8 +22,8 @@ pub mod tokens; pub use tokens::{ currency::{ - ActiveIssuanceOf, Currency, LockIdentifier, LockableCurrency, NamedReservableCurrency, - ReservableCurrency, TotalIssuanceOf, VestingSchedule, + ActiveIssuanceOf, Currency, InspectLockableCurrency, LockIdentifier, LockableCurrency, + NamedReservableCurrency, ReservableCurrency, TotalIssuanceOf, VestingSchedule, }, fungible, fungibles, imbalance::{Imbalance, OnUnbalanced, SignedImbalance}, diff --git a/substrate/frame/support/src/traits/tokens/currency.rs b/substrate/frame/support/src/traits/tokens/currency.rs index 8b773115011..282e7f64473 100644 --- a/substrate/frame/support/src/traits/tokens/currency.rs +++ b/substrate/frame/support/src/traits/tokens/currency.rs @@ -27,7 +27,7 @@ use sp_runtime::{traits::MaybeSerializeDeserialize, DispatchError}; mod reservable; pub use reservable::{NamedReservableCurrency, ReservableCurrency}; mod lockable; -pub use lockable::{LockIdentifier, LockableCurrency, VestingSchedule}; +pub use lockable::{InspectLockableCurrency, LockIdentifier, LockableCurrency, VestingSchedule}; /// Abstraction over a fungible assets system. pub trait Currency { diff --git a/substrate/frame/support/src/traits/tokens/currency/lockable.rs b/substrate/frame/support/src/traits/tokens/currency/lockable.rs index 955814f5aa9..51a48dd15ce 100644 --- a/substrate/frame/support/src/traits/tokens/currency/lockable.rs +++ b/substrate/frame/support/src/traits/tokens/currency/lockable.rs @@ -64,6 +64,12 @@ pub trait LockableCurrency: Currency { fn remove_lock(id: LockIdentifier, who: &AccountId); } +/// A inspect interface for a currency whose accounts can have liquidity restrictions. +pub trait InspectLockableCurrency: LockableCurrency { + /// Amount of funds locked for `who` associated with `id`. + fn balance_locked(id: LockIdentifier, who: &AccountId) -> Self::Balance; +} + /// A vesting schedule over a currency. This allows a particular currency to have vesting limits /// applied to it. pub trait VestingSchedule { -- GitLab From 597ea9203ad57d48b766fa7fb1fcc1d388118cb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 27 Mar 2024 23:02:37 +0000 Subject: [PATCH 047/128] pallet-scheduler: Unrequest call on failed lookup (#3849) When the scheduler fails to lookup a `call`, it should unrequest it, because it will not be required anymore. --- prdoc/pr_3849.prdoc | 13 +++++++++++++ substrate/frame/scheduler/src/lib.rs | 11 +++++++++++ substrate/frame/scheduler/src/tests.rs | 4 ++++ 3 files changed, 28 insertions(+) create mode 100644 prdoc/pr_3849.prdoc diff --git a/prdoc/pr_3849.prdoc b/prdoc/pr_3849.prdoc new file mode 100644 index 00000000000..a1372b60ffc --- /dev/null +++ b/prdoc/pr_3849.prdoc @@ -0,0 +1,13 @@ +title: Unrequest a pre-image when it failed to execute + +doc: + - audience: Runtime User + description: | + When a referenda finished the proposal will be scheduled. When it is scheduled, + the pre-image is requested. The pre-image is unrequested after the proposal + was executed. However, if the proposal failed to execute it wasn't unrequested. + Thus, it could not be removed from the on-chain state. This issue is now solved + by ensuring to unrequest the pre-image when it failed to execute. + +crates: + - name: pallet-scheduler diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index 62417b8d2cc..a53742679e0 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -1267,6 +1267,17 @@ impl Pallet { id: task.maybe_id, }); + // It was not available when we needed it, so we don't need to have requested it + // anymore. + T::Preimages::drop(&task.call); + + // We don't know why `peek` failed, thus we most account here for the "full weight". + let _ = weight.try_consume(T::WeightInfo::service_task( + task.call.lookup_len().map(|x| x as usize), + task.maybe_id.is_some(), + task.maybe_periodic.is_some(), + )); + return Err((Unavailable, Some(task))) }, }; diff --git a/substrate/frame/scheduler/src/tests.rs b/substrate/frame/scheduler/src/tests.rs index bb02320ad75..44035533639 100644 --- a/substrate/frame/scheduler/src/tests.rs +++ b/substrate/frame/scheduler/src/tests.rs @@ -3008,6 +3008,8 @@ fn unavailable_call_is_detected() { // Ensure the preimage isn't available assert!(!Preimage::have(&bound)); + // But we have requested it + assert!(Preimage::is_requested(&hash)); // Executes in block 4. run_to_block(4); @@ -3016,5 +3018,7 @@ fn unavailable_call_is_detected() { System::events().last().unwrap().event, crate::Event::CallUnavailable { task: (4, 0), id: Some(name) }.into() ); + // It should not be requested anymore. + assert!(!Preimage::is_requested(&hash)); }); } -- GitLab From 5d314eb03ed03d9030bb38b3d2e205f2f5c266ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 27 Mar 2024 23:52:50 +0000 Subject: [PATCH 048/128] pallet-referenda: Detect incorrect pre-image length (#3850) There has been a case that a referenda failed because the length given to `submit` was incorrect. The pallet can actually check the length if the pre-image already exists to ensure that these kind of issues are not happening again. --- prdoc/pr_3850.prdoc | 15 +++++++++++++++ substrate/frame/referenda/src/lib.rs | 12 ++++++++++++ substrate/frame/referenda/src/tests.rs | 16 ++++++++++++++++ 3 files changed, 43 insertions(+) create mode 100644 prdoc/pr_3850.prdoc diff --git a/prdoc/pr_3850.prdoc b/prdoc/pr_3850.prdoc new file mode 100644 index 00000000000..8f7ce16076e --- /dev/null +++ b/prdoc/pr_3850.prdoc @@ -0,0 +1,15 @@ +title: Detect incorrect pre-image length when submitting a referenda + +doc: + - audience: Runtime User + description: | + When submitting a referenda the `proposal` is passed as argument. + The `proposal` is most of the time a reference to a `pre-image` and + which also contains the length of the `pre-image`. This pull request + adds some logic to check that if the `pre-image` already exists and if + it exists, it ensures that the length is passed correctly. This prevents + that the referenda can not be executed because of a mismatch of this + length. + +crates: + - name: pallet-referenda diff --git a/substrate/frame/referenda/src/lib.rs b/substrate/frame/referenda/src/lib.rs index e616056c302..fbe27e1a478 100644 --- a/substrate/frame/referenda/src/lib.rs +++ b/substrate/frame/referenda/src/lib.rs @@ -424,6 +424,8 @@ pub mod pallet { BadStatus, /// The preimage does not exist. PreimageNotExist, + /// The preimage is stored with a different length than the one provided. + PreimageStoredWithDifferentLength, } #[pallet::hooks] @@ -462,6 +464,16 @@ pub mod pallet { let proposal_origin = *proposal_origin; let who = T::SubmitOrigin::ensure_origin(origin, &proposal_origin)?; + // If the pre-image is already stored, ensure that it has the same length as given in + // `proposal`. + if let (Some(preimage_len), Some(proposal_len)) = + (proposal.lookup_hash().and_then(|h| T::Preimages::len(&h)), proposal.lookup_len()) + { + if preimage_len != proposal_len { + return Err(Error::::PreimageStoredWithDifferentLength.into()) + } + } + let track = T::Tracks::track_for(&proposal_origin).map_err(|_| Error::::NoTrack)?; let submission_deposit = Self::take_deposit(who, T::SubmissionDeposit::get())?; diff --git a/substrate/frame/referenda/src/tests.rs b/substrate/frame/referenda/src/tests.rs index 8f51136de0b..52251fcbdbe 100644 --- a/substrate/frame/referenda/src/tests.rs +++ b/substrate/frame/referenda/src/tests.rs @@ -666,3 +666,19 @@ fn clear_metadata_works() { })); }); } + +#[test] +fn detects_incorrect_len() { + ExtBuilder::default().build_and_execute(|| { + let hash = note_preimage(1); + assert_noop!( + Referenda::submit( + RuntimeOrigin::signed(1), + Box::new(RawOrigin::Root.into()), + frame_support::traits::Bounded::Lookup { hash, len: 3 }, + DispatchTime::At(1), + ), + Error::::PreimageStoredWithDifferentLength + ); + }); +} -- GitLab From daf04f01823a64dc734c79344314e378e9588546 Mon Sep 17 00:00:00 2001 From: Tin Chung <56880684+chungquantin@users.noreply.github.com> Date: Thu, 28 Mar 2024 14:56:23 +0700 Subject: [PATCH 049/128] Deprecate scheduler traits v1 and v2 (#3718) This PR add `#[deprecated]` attribute to v1 and v2 of the schedule trait. Proposed in this issue: https://github.com/paritytech/polkadot-sdk/issues/3676 ```rust #[allow(deprecated)] #[deprecated = "traits::schedule::v1 is deprecated. Please use v3 instead."] pub mod v1 { ... } #[allow(deprecated)] #[deprecated = "traits::schedule::v2 is deprecated. Please use v3 instead."] pub mod v2 { ... } ``` polkadot address: 19nSqFQorfF2HxD3oBzWM3oCh4SaCRKWt1yvmgaPYGCo71J --------- Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Liam Aharon --- prdoc/pr_3718.prdoc | 13 +++++++++++++ substrate/frame/scheduler/src/lib.rs | 3 +++ substrate/frame/support/src/traits/schedule.rs | 18 ++++++++++++++++-- 3 files changed, 32 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_3718.prdoc diff --git a/prdoc/pr_3718.prdoc b/prdoc/pr_3718.prdoc new file mode 100644 index 00000000000..b2b24cc9704 --- /dev/null +++ b/prdoc/pr_3718.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Deprecate scheduler traits v1 and v2 + +doc: + - audience: Runtime Dev + description: | + Add `#[deprecated]` attribute to scheduler traits v1 and v2 to deprecate old versions + +crates: + - name: frame-support + - name: pallet-scheduler diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index a53742679e0..d19a1e0001d 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -1446,6 +1446,7 @@ impl Pallet { } } +#[allow(deprecated)] impl schedule::v2::Anon, ::RuntimeCall, T::PalletsOrigin> for Pallet { @@ -1480,6 +1481,8 @@ impl schedule::v2::Anon, ::RuntimeCall } } +// TODO: migrate `schedule::v2::Anon` to `v3` +#[allow(deprecated)] impl schedule::v2::Named, ::RuntimeCall, T::PalletsOrigin> for Pallet { diff --git a/substrate/frame/support/src/traits/schedule.rs b/substrate/frame/support/src/traits/schedule.rs index 7a7d1357da1..f41c73fe69a 100644 --- a/substrate/frame/support/src/traits/schedule.rs +++ b/substrate/frame/support/src/traits/schedule.rs @@ -130,7 +130,7 @@ impl MaybeHashed { } } -// TODO: deprecate +#[deprecated(note = "Use `v3` instead. Will be removed after September 2024.")] pub mod v1 { use super::*; @@ -218,10 +218,12 @@ pub mod v1 { fn next_dispatch_time(id: Vec) -> Result; } + #[allow(deprecated)] impl Anon for T where T: v2::Anon, { + #[allow(deprecated)] type Address = T::Address; fn schedule( @@ -232,10 +234,13 @@ pub mod v1 { call: Call, ) -> Result { let c = MaybeHashed::::Value(call); + + #[allow(deprecated)] T::schedule(when, maybe_periodic, priority, origin, c) } fn cancel(address: Self::Address) -> Result<(), ()> { + #[allow(deprecated)] T::cancel(address) } @@ -243,18 +248,22 @@ pub mod v1 { address: Self::Address, when: DispatchTime, ) -> Result { + #[allow(deprecated)] T::reschedule(address, when) } fn next_dispatch_time(address: Self::Address) -> Result { + #[allow(deprecated)] T::next_dispatch_time(address) } } + #[allow(deprecated)] impl Named for T where T: v2::Named, { + #[allow(deprecated)] type Address = T::Address; fn schedule_named( @@ -266,10 +275,12 @@ pub mod v1 { call: Call, ) -> Result { let c = MaybeHashed::::Value(call); + #[allow(deprecated)] T::schedule_named(id, when, maybe_periodic, priority, origin, c) } fn cancel_named(id: Vec) -> Result<(), ()> { + #[allow(deprecated)] T::cancel_named(id) } @@ -277,16 +288,18 @@ pub mod v1 { id: Vec, when: DispatchTime, ) -> Result { + #[allow(deprecated)] T::reschedule_named(id, when) } fn next_dispatch_time(id: Vec) -> Result { + #[allow(deprecated)] T::next_dispatch_time(id) } } } -// TODO: deprecate +#[deprecated(note = "Use `v3` instead. Will be removed after September 2024.")] pub mod v2 { use super::*; @@ -478,4 +491,5 @@ pub mod v3 { } } +#[allow(deprecated)] pub use v1::*; -- GitLab From 1ed44af368cc06c67c0b58b5d8a6ae65d6c80030 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Thu, 28 Mar 2024 19:01:37 +1100 Subject: [PATCH 050/128] [prdoc] Require SemVer bump level (#3816) A prerequisite for adding a stable branch and respecting SemVer on new stable releases is including SemVer bump levels in our PRDocs. Next release is scheduled for April 3rd, so it would be great to get this merged before then. Also added "None" as a valid bump option, to support test/benchmark changes and CI to ensure changed crates have an entry. --- prdoc/schema_user.json | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/prdoc/schema_user.json b/prdoc/schema_user.json index 1bd0b3b93ee..593113cf326 100644 --- a/prdoc/schema_user.json +++ b/prdoc/schema_user.json @@ -132,7 +132,8 @@ } }, "required": [ - "name" + "name", + "bump" ] }, "migration_db": { @@ -187,6 +188,11 @@ "const": "patch", "title": "Patch", "description": "A bump to the third leftmost non-zero digit of the version number." + }, + { + "const": "none", + "title": "None", + "description": "This change requires no SemVer bump (e.g. change was a test or benchmark)." } ] }, -- GitLab From 60846a081fac6ddb17957606e8ff2efb1d4606ed Mon Sep 17 00:00:00 2001 From: Alessandro Siniscalchi Date: Thu, 28 Mar 2024 10:12:37 +0100 Subject: [PATCH 051/128] [parachain-template] runtime API Implementations into `mod apis` (#3817) This PR significantly refactors the runtime API implementations to improve project structure, maintainability, and readability. Key changes include: 1. **Enhancing Visibility**: Adjusts the visibility of `RUNTIME_API_VERSIONS` in `impl_runtime_apis.rs` to `pub`, making it accessible throughout the runtime module. 2. **Centralizing API Implementations**: Introduces a new file, `apis.rs`, within the parachain template's runtime directory. 3. **Streamlining `lib.rs`**: Updates the main runtime library file to reflect these structural changes. It removes redundant API implementations and points `VERSION` to the newly exposed `RUNTIME_API_VERSIONS` from `apis.rs`, simplifying the overall runtime configuration. ### Motivations Behind the Refactoring: - **Improved Project Structure**: Centralizing API implementations in `apis.rs` offers a clearer, more navigable project structure. - **Better Readability**: Streamlining `lib.rs` and reducing clutter enhance readability, making it easier for new contributors to understand the project layout and logic. ### Summary of Changes: - Made `RUNTIME_API_VERSIONS` public in `impl_runtime_apis.rs`. - Added `apis.rs` to centralize runtime API implementations. - Streamlined `lib.rs` to adjust to the refactored project structure. --- prdoc/pr_3817.prdoc | 23 ++ .../api/proc-macro/src/impl_runtime_apis.rs | 2 +- templates/parachain/node/src/service.rs | 4 +- templates/parachain/runtime/src/apis.rs | 275 ++++++++++++++++++ templates/parachain/runtime/src/lib.rs | 237 +-------------- 5 files changed, 305 insertions(+), 236 deletions(-) create mode 100644 prdoc/pr_3817.prdoc create mode 100644 templates/parachain/runtime/src/apis.rs diff --git a/prdoc/pr_3817.prdoc b/prdoc/pr_3817.prdoc new file mode 100644 index 00000000000..bf9d397122f --- /dev/null +++ b/prdoc/pr_3817.prdoc @@ -0,0 +1,23 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Parachain Runtime API Implementations into mod apis Refactoring + +doc: + - audience: Runtime Dev + description: | + This PR introduces a refactoring to the runtime API implementations within the parachain template project. The primary changes include enhancing the visibility of `RUNTIME_API_VERSIONS` to `pub` in `impl_runtime_apis.rs`, centralizing API implementations in a new `apis.rs` file, and streamlining `lib.rs`. These changes aim to improve project structure, maintainability, and readability. + + Key Changes: + - `RUNTIME_API_VERSIONS` is now publicly accessible, enhancing module-wide visibility. + - Introduction of `apis.rs` centralizes runtime API implementations, promoting a cleaner and more navigable project structure. + - The main runtime library file, `lib.rs`, has been updated to reflect these structural changes, removing redundant API implementations and simplifying runtime configuration by pointing `VERSION` to the newly exposed `RUNTIME_API_VERSIONS` from `apis.rs`. + + Motivations: + - **Improved Project Structure**: Centralizing API implementations offers a more organized and understandable project layout. + - **Enhanced Readability**: The refactoring efforts aim to declutter `lib.rs`, facilitating easier comprehension for new contributors. + +crates: + - name: sp-api-proc-macro + - name: parachain-template-node + - name: parachain-template-runtime diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index b7e5600a017..87a381fd7bf 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -797,7 +797,7 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { } Ok(quote!( - const RUNTIME_API_VERSIONS: #c::ApisVec = #c::create_apis_vec!([ #( #result ),* ]); + pub const RUNTIME_API_VERSIONS: #c::ApisVec = #c::create_apis_vec!([ #( #result ),* ]); #( #sections )* )) diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 4dd24803e9b..bb4a5394958 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -6,8 +6,8 @@ use std::{sync::Arc, time::Duration}; use cumulus_client_cli::CollatorOptions; // Local Runtime Types use parachain_template_runtime::{ + apis::RuntimeApi, opaque::{Block, Hash}, - RuntimeApi, }; // Cumulus Imports @@ -46,7 +46,7 @@ impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { ); fn dispatch(method: &str, data: &[u8]) -> Option> { - parachain_template_runtime::api::dispatch(method, data) + parachain_template_runtime::apis::api::dispatch(method, data) } fn native_version() -> sc_executor::NativeVersion { diff --git a/templates/parachain/runtime/src/apis.rs b/templates/parachain/runtime/src/apis.rs new file mode 100644 index 00000000000..aa0cae843c3 --- /dev/null +++ b/templates/parachain/runtime/src/apis.rs @@ -0,0 +1,275 @@ +// This is free and unencumbered software released into the public domain. +// +// Anyone is free to copy, modify, publish, use, compile, sell, or +// distribute this software, either in source code form or as a compiled +// binary, for any purpose, commercial or non-commercial, and by any +// means. +// +// In jurisdictions that recognize copyright laws, the author or authors +// of this software dedicate any and all copyright interest in the +// software to the public domain. We make this dedication for the benefit +// of the public at large and to the detriment of our heirs and +// successors. We intend this dedication to be an overt act of +// relinquishment in perpetuity of all present and future rights to this +// software under copyright law. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// +// For more information, please refer to + +// External crates imports +use frame_support::{ + genesis_builder_helper::{build_config, create_default_config}, + weights::Weight, +}; +use pallet_aura::Authorities; +use sp_api::impl_runtime_apis; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + traits::Block as BlockT, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, +}; +use sp_std::prelude::Vec; +use sp_version::RuntimeVersion; + +// Local module imports +use super::{ + AccountId, Aura, Balance, Block, Executive, InherentDataExt, Nonce, ParachainSystem, Runtime, + RuntimeCall, RuntimeGenesisConfig, SessionKeys, System, TransactionPayment, VERSION, +}; + +impl_runtime_apis! { + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Authorities::::get().into_inner() + } + } + + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info(header) + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + use super::RuntimeBlockWeights; + + let weight = Executive::try_runtime_upgrade(checks).unwrap(); + (weight, RuntimeBlockWeights::get().max_block) + } + + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use super::*; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{BenchmarkError, Benchmarking, BenchmarkBatch}; + use super::*; + + use frame_system_benchmarking::Pallet as SystemBench; + impl frame_system_benchmarking::Config for Runtime { + fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); + Ok(()) + } + + fn verify_set_code() { + System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); + } + } + + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + impl cumulus_pallet_session_benchmarking::Config for Runtime {} + + use frame_support::traits::WhitelistedStorageKeys; + let whitelist = AllPalletsWithSystem::whitelisted_storage_keys(); + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + + if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } + Ok(batches) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } +} diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index 88b0f5a1474..5cfee123b01 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -6,19 +6,17 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +pub mod apis; mod weights; pub mod xcm_config; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use smallvec::smallvec; -use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{BlakeTwo256, Block as BlockT, IdentifyAccount, Verify}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, MultiSignature, + traits::{BlakeTwo256, IdentifyAccount, Verify}, + MultiSignature, }; use sp_std::prelude::*; @@ -30,7 +28,6 @@ use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ construct_runtime, derive_impl, dispatch::DispatchClass, - genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, weights::{ @@ -57,8 +54,6 @@ use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; -use pallet_aura::Authorities; - // XCM Imports use xcm::latest::prelude::BodyId; @@ -187,7 +182,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { authoring_version: 1, spec_version: 1, impl_version: 0, - apis: RUNTIME_API_VERSIONS, + apis: apis::RUNTIME_API_VERSIONS, transaction_version: 1, state_version: 1, }; @@ -543,230 +538,6 @@ mod benches { ); } -impl_runtime_apis! { - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - Authorities::::get().into_inner() - } - } - - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - - fn metadata_at_version(version: u32) -> Option { - Runtime::metadata_at_version(version) - } - - fn metadata_versions() -> sp_std::vec::Vec { - Runtime::metadata_versions() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Nonce { - System::account_nonce(account) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { - fn query_info( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - fn query_fee_details( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_fee_details(uxt, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi - for Runtime - { - fn query_call_info( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::RuntimeDispatchInfo { - TransactionPayment::query_call_info(call, len) - } - fn query_call_fee_details( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_call_fee_details(call, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - #[cfg(feature = "try-runtime")] - impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { - let weight = Executive::try_runtime_upgrade(checks).unwrap(); - (weight, RuntimeBlockWeights::get().max_block) - } - - fn execute_block( - block: Block, - state_root_check: bool, - signature_check: bool, - select: frame_try_runtime::TryStateSelect, - ) -> Weight { - // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to - // have a backtrace here. - Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn benchmark_metadata(extra: bool) -> ( - Vec, - Vec, - ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; - use frame_support::traits::StorageInfoTrait; - use frame_system_benchmarking::Pallet as SystemBench; - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - - let mut list = Vec::::new(); - list_benchmarks!(list, extra); - - let storage_info = AllPalletsWithSystem::storage_info(); - (list, storage_info) - } - - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{BenchmarkError, Benchmarking, BenchmarkBatch}; - - use frame_system_benchmarking::Pallet as SystemBench; - impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { - ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); - Ok(()) - } - - fn verify_set_code() { - System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); - } - } - - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - impl cumulus_pallet_session_benchmarking::Config for Runtime {} - - use frame_support::traits::WhitelistedStorageKeys; - let whitelist = AllPalletsWithSystem::whitelisted_storage_keys(); - - let mut batches = Vec::::new(); - let params = (&config, &whitelist); - add_benchmarks!(params, batches); - - if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } - Ok(batches) - } - } - - impl sp_genesis_builder::GenesisBuilder for Runtime { - fn create_default_config() -> Vec { - create_default_config::() - } - - fn build_config(config: Vec) -> sp_genesis_builder::Result { - build_config::(config) - } - } -} - cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, -- GitLab From 78b1cab9e8064557b2ebab9d3d039ab448aacd72 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Thu, 28 Mar 2024 11:40:06 +0100 Subject: [PATCH 052/128] Contracts: add test builders (#3796) Cleanup tests (-2.7k lines !) using some builder patterns to build pallet_contracts api calls --- Cargo.lock | 1 + substrate/frame/contracts/Cargo.toml | 1 + substrate/frame/contracts/src/tests.rs | 2729 ++++------------- .../frame/contracts/src/tests/builder.rs | 219 ++ 4 files changed, 733 insertions(+), 2217 deletions(-) create mode 100644 substrate/frame/contracts/src/tests/builder.rs diff --git a/Cargo.lock b/Cargo.lock index 2382dc8d162..8188e571fcd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9553,6 +9553,7 @@ dependencies = [ "pallet-timestamp", "pallet-utility", "parity-scale-codec", + "paste", "pretty_assertions", "rand", "rand_pcg", diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index be3bafcd23f..2aa37a2bf21 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -18,6 +18,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +paste = { version = "1.0", default-features = false } bitflags = "1.3" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index ed486fc4a67..a51faa88c41 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -15,6 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +mod builder; mod pallet_dummy; mod test_debug; @@ -610,7 +611,7 @@ fn calling_plain_account_fails() { let base_cost = <::WeightInfo as WeightInfo>::call(); assert_eq!( - Contracts::call(RuntimeOrigin::signed(ALICE), BOB, 0, GAS_LIMIT, None, Vec::new()), + builder::call(BOB).build(), Err(DispatchErrorWithPostInfo { error: Error::::ContractNotFound.into(), post_info: PostDispatchInfo { @@ -668,32 +669,13 @@ fn migration_in_progress_works() { Contracts::set_code(RuntimeOrigin::signed(ALICE), BOB.clone(), code_hash), Error::::MigrationInProgress, ); + assert_err_ignore_postinfo!(builder::call(BOB).build(), Error::::MigrationInProgress); assert_err_ignore_postinfo!( - Contracts::call(RuntimeOrigin::signed(ALICE), BOB, 0, GAS_LIMIT, None, vec![],), + builder::instantiate_with_code(wasm).value(100_000).build(), Error::::MigrationInProgress, ); assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 100_000, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - ), - Error::::MigrationInProgress, - ); - assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 100_000, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).value(100_000).build(), Error::::MigrationInProgress, ); }); @@ -721,20 +703,9 @@ fn instantiate_and_call_and_deposit_event() { initialize_block(2); // Check at the end to get hash on error easily - let addr = Contracts::bare_instantiate( - ALICE, - value, - GAS_LIMIT, - None, - Code::Existing(code_hash), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Existing(code_hash)) + .value(value) + .build_and_unwrap_account_id(); assert!(ContractInfoOf::::contains_key(&addr)); assert_eq!( @@ -812,41 +783,21 @@ fn deposit_event_max_value_limit() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); // Call contract with allowed storage value. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2), // we are copying a huge buffer, - None, - ::Schedule::get().limits.payload_len.encode(), - )); + assert_ok!(builder::call(addr.clone()) + .gas_limit(GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2)) // we are copying a huge buffer, + .data(::Schedule::get().limits.payload_len.encode()) + .build()); // Call contract with too large a storage value. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr, - 0, - GAS_LIMIT, - None, - (::Schedule::get().limits.payload_len + 1).encode(), - ), + builder::call(addr) + .data((::Schedule::get().limits.payload_len + 1).encode()) + .build(), Error::::ValueTooLarge, ); }); @@ -860,32 +811,16 @@ fn run_out_of_fuel_engine() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 100 * min_balance, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100 * min_balance) + .build_and_unwrap_account_id(); // Call the contract with a fixed gas limit. It must run out of gas because it just // loops forever. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr, // newly created account - 0, - Weight::from_parts(1_000_000_000_000, u64::MAX), - None, - vec![], - ), + builder::call(addr) + .gas_limit(Weight::from_parts(1_000_000_000_000, u64::MAX)) + .build(), Error::::OutOfGas, ); }); @@ -899,36 +834,18 @@ fn run_out_of_fuel_host() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let gas_limit = Weight::from_parts(u32::MAX as u64, GAS_LIMIT.proof_size()); // Use chain extension to charge more ref_time than it is available. - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - gas_limit, - None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &u32::MAX.encode() }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result; + let result = builder::bare_call(addr.clone()) + .gas_limit(gas_limit) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &u32::MAX.encode() }.into()) + .build() + .result; assert_err!(result, >::OutOfGas); }); } @@ -938,63 +855,20 @@ fn gas_syncs_work() { let (code, _code_hash) = compile_module::("caller_is_origin_n").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_account_id(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - 0u32.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).data(0u32.encode()).build(); assert_ok!(result.result); let engine_consumed_noop = result.gas_consumed.ref_time(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - 1u32.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).data(1u32.encode()).build(); assert_ok!(result.result); let gas_consumed_once = result.gas_consumed.ref_time(); let host_consumed_once = ::Schedule::get().host_fn_weights.caller_is_origin.ref_time(); let engine_consumed_once = gas_consumed_once - host_consumed_once - engine_consumed_noop; - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - 2u32.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr).data(2u32.encode()).build(); assert_ok!(result.result); let gas_consumed_twice = result.gas_consumed.ref_time(); let host_consumed_twice = host_consumed_once * 2; @@ -1018,56 +892,21 @@ fn instantiate_unique_trie_id() { .unwrap(); // Instantiate the contract and store its trie id for later comparison. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Existing(code_hash), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = + builder::bare_instantiate(Code::Existing(code_hash)).build_and_unwrap_account_id(); let trie_id = get_contract(&addr).trie_id; // Try to instantiate it again without termination should yield an error. assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).build(), >::DuplicateContract, ); // Terminate the contract. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Re-Instantiate after termination. - assert_ok!(Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - )); + assert_ok!(builder::instantiate(code_hash).build()); // Trie ids shouldn't match or we might have a collision assert_ne!(trie_id, get_contract(&addr).trie_id); @@ -1081,42 +920,22 @@ fn storage_max_value_limit() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); get_contract(&addr); // Call contract with allowed storage value. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2), // we are copying a huge buffer - None, - ::Schedule::get().limits.payload_len.encode(), - )); + assert_ok!(builder::call(addr.clone()) + .gas_limit(GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2)) // we are copying a huge buffer + .data(::Schedule::get().limits.payload_len.encode()) + .build()); // Call contract with too large a storage value. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr, - 0, - GAS_LIMIT, - None, - (::Schedule::get().limits.payload_len + 1).encode(), - ), + builder::call(addr) + .data((::Schedule::get().limits.payload_len + 1).encode()) + .build(), Error::::ValueTooLarge, ); }); @@ -1132,20 +951,9 @@ fn deploy_and_call_other_contract() { // Create let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let caller_addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let caller_addr = builder::bare_instantiate(Code::Upload(caller_wasm)) + .value(100_000) + .build_and_unwrap_account_id(); Contracts::bare_upload_code(ALICE, callee_wasm, None, Determinism::Enforced).unwrap(); let callee_addr = Contracts::contract_address( @@ -1160,14 +968,9 @@ fn deploy_and_call_other_contract() { // Call BOB contract, which attempts to instantiate and call the callee contract and // makes various assertions on the results from those calls. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - caller_addr.clone(), - 0, - GAS_LIMIT, - None, - callee_code_hash.as_ref().to_vec(), - )); + assert_ok!(builder::call(caller_addr.clone()) + .data(callee_code_hash.as_ref().to_vec()) + .build()); assert_eq!( System::events(), @@ -1266,20 +1069,9 @@ fn delegate_call() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the 'caller' - let caller_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let caller_addr = builder::bare_instantiate(Code::Upload(caller_wasm)) + .value(300_000) + .build_and_unwrap_account_id(); // Only upload 'callee' code assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), @@ -1288,14 +1080,10 @@ fn delegate_call() { Determinism::Enforced, )); - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - caller_addr.clone(), - 1337, - GAS_LIMIT, - None, - callee_code_hash.as_ref().to_vec(), - )); + assert_ok!(builder::call(caller_addr.clone()) + .value(1337) + .data(callee_code_hash.as_ref().to_vec()) + .build()); }); } @@ -1306,20 +1094,9 @@ fn transfer_expendable_cannot_kill_account() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 1_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(1_000) + .build_and_unwrap_account_id(); // Check that the BOB contract has been instantiated. get_contract(&addr); @@ -1355,34 +1132,16 @@ fn cannot_self_destruct_through_draining() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - value, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(value) + .build_and_unwrap_account_id(); // Check that the BOB contract has been instantiated. get_contract(&addr); // Call BOB which makes it send all funds to the zero address // The contract code asserts that the transfer fails with the correct error code - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Make sure the account wasn't remove by sending all free balance away. assert_eq!( @@ -1400,20 +1159,7 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); @@ -1423,27 +1169,13 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { assert_eq!(::Currency::total_balance(&addr), info_deposit + min_balance); // Create 100 bytes of storage with a price of per byte and a single storage item of price 2 - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - 100u32.to_le_bytes().to_vec() - )); + assert_ok!(builder::call(addr.clone()).data(100u32.to_le_bytes().to_vec()).build()); assert_eq!(get_contract(&addr).total_deposit(), info_deposit + 102); // Increase the byte price and trigger a refund. This should not have any influence because // the removal is pro rata and exactly those 100 bytes should have been removed. DEPOSIT_PER_BYTE.with(|c| *c.borrow_mut() = 500); - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - 0u32.to_le_bytes().to_vec() - )); + assert_ok!(builder::call(addr.clone()).data(0u32.to_le_bytes().to_vec()).build()); // Make sure the account wasn't removed by the refund assert_eq!( @@ -1461,20 +1193,9 @@ fn cannot_self_destruct_while_live() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); // Check that the BOB contract has been instantiated. get_contract(&addr); @@ -1482,14 +1203,7 @@ fn cannot_self_destruct_while_live() { // Call BOB with input data, forcing it make a recursive call to itself to // self-destruct, resulting in a trap. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![0], - ), + builder::call(addr.clone()).data(vec![0]).build(), Error::::ContractTrapped, ); @@ -1507,20 +1221,9 @@ fn self_destruct_works() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); // Check that the BOB contract has been instantiated. let _ = get_contract(&addr); @@ -1531,10 +1234,7 @@ fn self_destruct_works() { initialize_block(2); // Call BOB without input data which triggers termination. - assert_matches!( - Contracts::call(RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), - Ok(_) - ); + assert_matches!(builder::call(addr.clone()).build(), Ok(_)); // Check that code is still there but refcount dropped to zero. assert_refcount!(&code_hash, 0); @@ -1621,20 +1321,10 @@ fn destroy_contract_and_transfer_funds() { // This deploys the BOB contract, which in turn deploys the CHARLIE contract during // construction. - let addr_bob = Contracts::bare_instantiate( - ALICE, - 200_000, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - callee_code_hash.as_ref().to_vec(), - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_bob = builder::bare_instantiate(Code::Upload(caller_wasm)) + .value(200_000) + .data(callee_code_hash.as_ref().to_vec()) + .build_and_unwrap_account_id(); // Check that the CHARLIE contract has been instantiated. let addr_charlie = @@ -1642,14 +1332,7 @@ fn destroy_contract_and_transfer_funds() { get_contract(&addr_charlie); // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_bob, - 0, - GAS_LIMIT, - None, - addr_charlie.encode(), - )); + assert_ok!(builder::call(addr_bob).data(addr_charlie.encode()).build()); // Check that CHARLIE has moved on to the great beyond (ie. died). assert!(get_contract_checked(&addr_charlie).is_none()); @@ -1664,15 +1347,7 @@ fn cannot_self_destruct_in_constructor() { // Fail to instantiate the BOB because the constructor calls seal_terminate. assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 100_000, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - ), + builder::instantiate_with_code(wasm).value(100_000).build(), Error::::TerminatedInConstructor, ); }); @@ -1686,20 +1361,9 @@ fn crypto_hashes() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the CRYPTO_HASHES contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); // Perform the call. let input = b"_DEAD_BEEF"; use sp_io::hashing::*; @@ -1748,36 +1412,13 @@ fn transfer_return_code() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // Contract has only the minimal balance so any transfer will fail. ::Currency::set_balance(&addr, min_balance); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()).build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); }); } @@ -1791,113 +1432,60 @@ fn call_return_code() { let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); - let addr_bob = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![0], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_bob = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .data(vec![0]) + .build_and_unwrap_account_id(); ::Currency::set_balance(&addr_bob, min_balance); // Contract calls into Django which is no valid contract - let result = Contracts::bare_call( - ALICE, - addr_bob.clone(), - 0, - GAS_LIMIT, - None, - AsRef::<[u8]>::as_ref(&DJANGO).to_vec(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_bob.clone()) + .data(AsRef::<[u8]>::as_ref(&DJANGO).to_vec()) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::NotCallable); - let addr_django = Contracts::bare_instantiate( - CHARLIE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(callee_code), - vec![0], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_django = builder::bare_instantiate(Code::Upload(callee_code)) + .origin(CHARLIE) + .value(min_balance * 100) + .data(vec![0]) + .build_and_unwrap_account_id(); ::Currency::set_balance(&addr_django, min_balance); // Contract has only the minimal balance so any transfer will fail. - let result = Contracts::bare_call( - ALICE, - addr_bob.clone(), - 0, - GAS_LIMIT, - None, - AsRef::<[u8]>::as_ref(&addr_django) - .iter() - .chain(&0u32.to_le_bytes()) - .cloned() - .collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_bob.clone()) + .data( + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&0u32.to_le_bytes()) + .cloned() + .collect(), + ) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but callee reverts because "1" is passed. ::Currency::set_balance(&addr_bob, min_balance + 1000); - let result = Contracts::bare_call( - ALICE, - addr_bob.clone(), - 0, - GAS_LIMIT, - None, - AsRef::<[u8]>::as_ref(&addr_django) - .iter() - .chain(&1u32.to_le_bytes()) - .cloned() - .collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_bob.clone()) + .data( + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&1u32.to_le_bytes()) + .cloned() + .collect(), + ) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. - let result = Contracts::bare_call( - ALICE, - addr_bob, - 0, - GAS_LIMIT, - None, - AsRef::<[u8]>::as_ref(&addr_django) - .iter() - .chain(&2u32.to_le_bytes()) - .cloned() - .collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_bob) + .data( + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&2u32.to_le_bytes()) + .cloned() + .collect(), + ) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); }); } @@ -1912,95 +1500,34 @@ fn instantiate_return_code() { let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); let callee_hash = callee_hash.as_ref().to_vec(); - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - min_balance * 100, - GAS_LIMIT, - None, - callee_code, - vec![], - vec![], - )); + assert_ok!(builder::instantiate_with_code(callee_code).value(min_balance * 100).build()); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // Contract has only the minimal balance so any transfer will fail. ::Currency::set_balance(&addr, min_balance); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - callee_hash.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()) + .data(callee_hash.clone()) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but the passed code hash is invalid ::Currency::set_balance(&addr, min_balance + 10_000); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![0; 33], - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()).data(vec![0; 33]).build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CodeNotFound); // Contract has enough balance but callee reverts because "1" is passed. - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()) + .data(callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect()) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr) + .data(callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect()) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); }); } @@ -2013,15 +1540,7 @@ fn disabled_chain_extension_wont_deploy() { let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); TestExtension::disable(); assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 3 * min_balance, - GAS_LIMIT, - None, - code, - vec![], - vec![], - ), + builder::instantiate_with_code(code).value(3 * min_balance).build(), >::CodeRejected, ); }); @@ -2033,23 +1552,12 @@ fn disabled_chain_extension_errors_on_call() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); TestExtension::disable(); assert_err_ignore_postinfo!( - Contracts::call(RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), + builder::call(addr.clone()).build(), Error::::CodeRejected, ); }); @@ -2061,141 +1569,61 @@ fn chain_extension_works() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // 0 = read input buffer and pass it through as output let input: Vec = ExtensionInput { extension_id: 0, func_id: 0, extra: &[99] }.into(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).data(input.clone()).build(); assert_eq!(TestExtension::last_seen_buffer(), input); assert_eq!(result.result.unwrap().data, input); // 1 = treat inputs as integer primitives and store the supplied integers - Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into()) + .build_and_unwrap_result(); assert_eq!(TestExtension::last_seen_input_len(), 4); // 2 = charge some extra weight (amount supplied in the fifth byte) - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &0u32.encode() }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &0u32.encode() }.into()) + .build(); assert_ok!(result.result); let gas_consumed = result.gas_consumed; - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &42u32.encode() }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &42u32.encode() }.into()) + .build(); assert_ok!(result.result); assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 42); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &95u32.encode() }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &95u32.encode() }.into()) + .build(); assert_ok!(result.result); assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 95); // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into()) + .build_and_unwrap_result(); assert_eq!(result.flags, ReturnFlags::REVERT); assert_eq!(result.data, vec![42, 99]); // diverging to second chain extension that sets flags to 0x1 and returns a fixed buffer // We set the MSB part to 1 (instead of 0) which routes the request into the second // extension - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 1, func_id: 0, extra: &[] }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 1, func_id: 0, extra: &[] }.into()) + .build_and_unwrap_result(); assert_eq!(result.flags, ReturnFlags::REVERT); assert_eq!(result.data, vec![0x4B, 0x1D]); // Diverging to third chain extension that is disabled // We set the MSB part to 2 (instead of 0) which routes the request into the third extension assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 2, func_id: 0, extra: &[] }.into(), - ), + builder::call(addr.clone()) + .data(ExtensionInput { extension_id: 2, func_id: 0, extra: &[] }.into()) + .build(), Error::::NoChainExtension, ); }); @@ -2207,20 +1635,9 @@ fn chain_extension_temp_storage_works() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // Call func 0 and func 1 back to back. let stop_recursion = 0u8; @@ -2231,20 +1648,7 @@ fn chain_extension_temp_storage_works() { .as_ref(), ); - assert_ok!( - Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - ); + assert_ok!(builder::bare_call(addr.clone()).data(input.clone()).build().result); }) } @@ -2255,20 +1659,9 @@ fn lazy_removal_works() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let trie = &info.child_trie_info(); @@ -2277,14 +1670,7 @@ fn lazy_removal_works() { child::put(trie, &[99], &42); // Terminate the contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2309,20 +1695,10 @@ fn lazy_batch_removal_works() { let mut tries: Vec = vec![]; for i in 0..3u8 { - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code.clone()), - vec![], - vec![i], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code.clone())) + .value(min_balance * 100) + .salt(vec![i]) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let trie = &info.child_trie_info(); @@ -2332,14 +1708,7 @@ fn lazy_batch_removal_works() { // Terminate the contract. Contract info should be gone, but value should be still there // as the lazy removal did not run, yet. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); assert!(!>::contains_key(&addr)); assert_matches!(child::get(trie, &[99]), Some(42)); @@ -2375,20 +1744,9 @@ fn lazy_removal_partial_remove_works() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let info = get_contract(&addr); @@ -2399,14 +1757,7 @@ fn lazy_removal_partial_remove_works() { >::insert(&addr, info.clone()); // Terminate the contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2457,20 +1808,9 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let trie = &info.child_trie_info(); @@ -2479,14 +1819,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { child::put(trie, &[99], &42); // Terminate the contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2529,20 +1862,9 @@ fn lazy_removal_does_not_use_all_weight() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let (weight_per_key, max_keys) = ContractInfo::::deletion_budget(weight_limit); @@ -2559,14 +1881,7 @@ fn lazy_removal_does_not_use_all_weight() { >::insert(&addr, info.clone()); // Terminate the contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2620,20 +1935,10 @@ fn deletion_queue_ring_buffer_overflow() { // add 3 contracts to the deletion queue for i in 0..3u8 { - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code.clone()), - vec![], - vec![i], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code.clone())) + .value(min_balance * 100) + .salt(vec![i]) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let trie = &info.child_trie_info(); @@ -2643,14 +1948,7 @@ fn deletion_queue_ring_buffer_overflow() { // Terminate the contract. Contract info should be gone, but value should be still // there as the lazy removal did not run, yet. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); assert!(!>::contains_key(&addr)); assert_matches!(child::get(trie, &[99]), Some(42)); @@ -2678,87 +1976,36 @@ fn refcounter() { let min_balance = Contracts::min_balance(); // Create two contracts with the same code and check that they do in fact share it. - let addr0 = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm.clone()), - vec![], - vec![0], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let addr1 = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm.clone()), - vec![], - vec![1], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr0 = builder::bare_instantiate(Code::Upload(wasm.clone())) + .value(min_balance * 100) + .salt(vec![0]) + .build_and_unwrap_account_id(); + let addr1 = builder::bare_instantiate(Code::Upload(wasm.clone())) + .value(min_balance * 100) + .salt(vec![1]) + .build_and_unwrap_account_id(); assert_refcount!(code_hash, 2); // Sharing should also work with the usual instantiate call - let addr2 = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Existing(code_hash), - vec![], - vec![2], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr2 = builder::bare_instantiate(Code::Existing(code_hash)) + .value(min_balance * 100) + .salt(vec![2]) + .build_and_unwrap_account_id(); assert_refcount!(code_hash, 3); - - // Terminating one contract should decrement the refcount - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr0, - 0, - GAS_LIMIT, - None, - vec![] - )); + + // Terminating one contract should decrement the refcount + assert_ok!(builder::call(addr0).build()); assert_refcount!(code_hash, 2); // remove another one - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr1, - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr1).build()); assert_refcount!(code_hash, 1); // Pristine code should still be there PristineCode::::get(code_hash).unwrap(); // remove the last contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr2, - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr2).build()); assert_refcount!(code_hash, 0); // refcount is `0` but code should still exists because it needs to be removed manually @@ -2772,31 +2019,10 @@ fn debug_message_works() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ); + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); + let result = builder::bare_call(addr).debug(DebugInfo::UnsafeDebug).build(); assert_matches!(result.result, Ok(_)); assert_eq!(std::str::from_utf8(&result.debug_message).unwrap(), "Hello World!"); @@ -2809,32 +2035,11 @@ fn debug_message_logging_disabled() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); // disable logging by passing `false` - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).build(); assert_matches!(result.result, Ok(_)); // the dispatchables always run without debugging assert_ok!(Contracts::call(RuntimeOrigin::signed(ALICE), addr, 0, GAS_LIMIT, None, vec![])); @@ -2848,31 +2053,10 @@ fn debug_message_invalid_utf8() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ); + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); + let result = builder::bare_call(addr).debug(DebugInfo::UnsafeDebug).build(); assert_ok!(result.result); assert!(result.debug_message.is_empty()); }); @@ -2887,50 +2071,17 @@ fn gas_estimation_for_subcalls() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 2_000 * min_balance); - let addr_caller = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); - let addr_dummy = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(dummy_code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_dummy = builder::bare_instantiate(Code::Upload(dummy_code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); - let addr_call_runtime = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(call_runtime_code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_call_runtime = builder::bare_instantiate(Code::Upload(call_runtime_code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // Run the test for all of those weight limits for the subcall let weights = [ @@ -2966,17 +2117,7 @@ fn gas_estimation_for_subcalls() { .collect(); // Call in order to determine the gas that is required for this call - let result = Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - GAS_LIMIT, - None, - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr_caller.clone()).data(input.clone()).build(); assert_ok!(&result.result); // If the out of gas happens in the subcall the caller contract @@ -2992,51 +2133,33 @@ fn gas_estimation_for_subcalls() { // Make the same call using the estimated gas. Should succeed. assert_ok!( - Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - result.gas_required, - Some(result.storage_deposit.charge_or_zero()), - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result + builder::bare_call(addr_caller.clone()) + .gas_limit(result.gas_required) + .storage_deposit_limit(Some(result.storage_deposit.charge_or_zero())) + .data(input.clone()) + .build() + .result ); // Check that it fails with too little ref_time assert_err!( - Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - result.gas_required.sub_ref_time(1), - Some(result.storage_deposit.charge_or_zero()), - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result, + builder::bare_call(addr_caller.clone()) + .gas_limit(result.gas_required.sub_ref_time(1)) + .storage_deposit_limit(Some(result.storage_deposit.charge_or_zero())) + .data(input.clone()) + .build() + .result, error, ); // Check that it fails with too little proof_size assert_err!( - Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - result.gas_required.sub_proof_size(1), - Some(result.storage_deposit.charge_or_zero()), - input, - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result, + builder::bare_call(addr_caller.clone()) + .gas_limit(result.gas_required.sub_proof_size(1)) + .storage_deposit_limit(Some(result.storage_deposit.charge_or_zero())) + .data(input) + .build() + .result, error, ); } @@ -3052,20 +2175,10 @@ fn gas_estimation_call_runtime() { let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); - let addr_caller = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![], - vec![0], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .salt(vec![0]) + .build_and_unwrap_account_id(); // Call something trivial with a huge gas limit so that we can observe the effects // of pre-charging. This should create a difference between consumed and required. @@ -3073,17 +2186,7 @@ fn gas_estimation_call_runtime() { pre_charge: Weight::from_parts(10_000_000, 1_000), actual_weight: Weight::from_parts(100, 100), }); - let result = Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - GAS_LIMIT, - None, - call.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr_caller.clone()).data(call.encode()).build(); // contract encodes the result of the dispatch runtime let outcome = u32::decode(&mut result.result.unwrap().data.as_ref()).unwrap(); assert_eq!(outcome, 0); @@ -3091,18 +2194,11 @@ fn gas_estimation_call_runtime() { // Make the same call using the required gas. Should succeed. assert_ok!( - Contracts::bare_call( - ALICE, - addr_caller, - 0, - result.gas_required, - None, - call.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result + builder::bare_call(addr_caller) + .gas_limit(result.gas_required) + .data(call.encode()) + .build() + .result ); }); } @@ -3116,35 +2212,15 @@ fn call_runtime_reentrancy_guarded() { let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); - let addr_caller = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![], - vec![0], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .salt(vec![0]) + .build_and_unwrap_account_id(); - let addr_callee = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(callee_code), - vec![], - vec![1], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_callee = builder::bare_instantiate(Code::Upload(callee_code)) + .value(min_balance * 100) + .salt(vec![1]) + .build_and_unwrap_account_id(); // Call pallet_contracts call() dispatchable let call = RuntimeCall::Contracts(crate::Call::call { @@ -3157,19 +2233,9 @@ fn call_runtime_reentrancy_guarded() { // Call runtime to re-enter back to contracts engine by // calling dummy contract - let result = Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - GAS_LIMIT, - None, - call.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_caller.clone()) + .data(call.encode()) + .build_and_unwrap_result(); // Call to runtime should fail because of the re-entrancy guard assert_return_code!(result, RuntimeReturnCode::CallRuntimeFailed); }); @@ -3183,20 +2249,9 @@ fn ecdsa_recover() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the ecdsa_recover contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); #[rustfmt::skip] let signature: [u8; 65] = [ @@ -3246,17 +2301,10 @@ fn bare_instantiate_returns_events() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let result = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::UnsafeCollect, - ); + let result = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .collect_events(CollectEvents::UnsafeCollect) + .build(); let events = result.events.unwrap(); assert!(!events.is_empty()); @@ -3271,17 +2319,7 @@ fn bare_instantiate_does_not_return_events() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let result = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ); + let result = builder::bare_instantiate(Code::Upload(wasm)).value(min_balance * 100).build(); let events = result.events; assert!(!System::events().is_empty()); @@ -3296,32 +2334,13 @@ fn bare_call_returns_events() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::Skip, - CollectEvents::UnsafeCollect, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()) + .collect_events(CollectEvents::UnsafeCollect) + .build(); let events = result.events.unwrap(); assert_return_code!(&result.result.unwrap(), RuntimeReturnCode::Success); @@ -3337,32 +2356,11 @@ fn bare_call_does_not_return_events() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).build(); let events = result.events; assert_return_code!(&result.result.unwrap(), RuntimeReturnCode::Success); @@ -3379,20 +2377,9 @@ fn sr25519_verify() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the sr25519_verify contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); let call_with = |message: &[u8; 11]| { // Alice's signature for "hello world" @@ -3450,34 +2437,10 @@ fn failed_deposit_charge_should_roll_back_call() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate both contracts. - let addr_caller = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_caller.clone()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let addr_callee = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_callee.clone()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(wasm_caller.clone())) + .build_and_unwrap_account_id(); + let addr_callee = builder::bare_instantiate(Code::Upload(wasm_callee.clone())) + .build_and_unwrap_account_id(); // Give caller proxy access to Alice. assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(ALICE), addr_caller.clone(), (), 0)); @@ -3701,15 +2664,7 @@ fn remove_code_in_use() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - )); + assert_ok!(builder::instantiate_with_code(wasm).build()); // Drop previous events initialize_block(2); @@ -3753,20 +2708,7 @@ fn instantiate_with_zero_balance_works() { initialize_block(2); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); // Ensure the contract was stored and get expected deposit amount to be reserved. let deposit_expected = expected_deposit(ensure_stored(code_hash)); @@ -3850,20 +2792,9 @@ fn instantiate_with_below_existential_deposit_works() { initialize_block(2); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - value, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(value) + .build_and_unwrap_account_id(); // Ensure the contract was stored and get expected deposit amount to be reserved. let deposit_expected = expected_deposit(ensure_stored(code_hash)); @@ -3949,20 +2880,7 @@ fn storage_deposit_works() { ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let mut deposit = test_utils::contract_info_storage_deposit(&addr); @@ -3970,41 +2888,23 @@ fn storage_deposit_works() { initialize_block(2); // Create storage - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 42, - GAS_LIMIT, - None, - (1_000u32, 5_000u32).encode(), - )); + assert_ok!(builder::call(addr.clone()) + .value(42) + .data((1_000u32, 5_000u32).encode()) + .build()); // 4 is for creating 2 storage items let charged0 = 4 + 1_000 + 5_000; deposit += charged0; assert_eq!(get_contract(&addr).total_deposit(), deposit); - - // Add more storage (but also remove some) - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - (2_000u32, 4_900u32).encode(), - )); + + // Add more storage (but also remove some) + assert_ok!(builder::call(addr.clone()).data((2_000u32, 4_900u32).encode()).build()); let charged1 = 1_000 - 100; deposit += charged1; assert_eq!(get_contract(&addr).total_deposit(), deposit); // Remove more storage (but also add some) - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - (2_100u32, 900u32).encode(), - )); + assert_ok!(builder::call(addr.clone()).data((2_100u32, 900u32).encode()).build()); // -1 for numeric instability let refunded0 = 4_000 - 100 - 1; deposit -= refunded0; @@ -4093,43 +2993,12 @@ fn storage_deposit_callee_works() { let min_balance = Contracts::min_balance(); // Create both contracts: Constructors do nothing. - let addr_caller = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_caller), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let addr_callee = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_callee), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = + builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_account_id(); + let addr_callee = + builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_account_id(); - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller, - 0, - GAS_LIMIT, - None, - (100u32, &addr_callee).encode() - )); + assert_ok!(builder::call(addr_caller).data((100u32, &addr_callee).encode()).build()); let callee = get_contract(&addr_callee); let deposit = DepositPerByte::get() * 100 + DepositPerItem::get() * 1; @@ -4152,20 +3021,7 @@ fn set_code_extrinsic() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), @@ -4189,7 +3045,7 @@ fn set_code_extrinsic() { assert_eq!(get_contract(&addr).code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); - assert_eq!(System::events(), vec![],); + assert_eq!(System::events(), vec![]); // contract must exist assert_noop!( @@ -4199,7 +3055,7 @@ fn set_code_extrinsic() { assert_eq!(get_contract(&addr).code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); - assert_eq!(System::events(), vec![],); + assert_eq!(System::events(), vec![]); // new code hash must exist assert_noop!( @@ -4209,7 +3065,7 @@ fn set_code_extrinsic() { assert_eq!(get_contract(&addr).code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); - assert_eq!(System::events(), vec![],); + assert_eq!(System::events(), vec![]); // successful call assert_ok!(Contracts::set_code(RuntimeOrigin::root(), addr.clone(), new_code_hash)); @@ -4239,20 +3095,9 @@ fn slash_cannot_kill_account() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); let min_balance = Contracts::min_balance(); - let addr = Contracts::bare_instantiate( - ALICE, - value, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(value) + .build_and_unwrap_account_id(); // Drop previous events initialize_block(2); @@ -4303,29 +3148,13 @@ fn contract_reverted() { // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code_hash, - input.clone(), - vec![], - ), + builder::instantiate(code_hash).data(input.clone()).build(), >::ContractReverted, ); // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm, - input.clone(), - vec![], - ), + builder::instantiate_with_code(wasm).data(input.clone()).build(), >::ContractReverted, ); @@ -4333,66 +3162,26 @@ fn contract_reverted() { // This is just a different way of transporting the error that allows the read out // the `data` which is only there on success. Obviously, the contract isn't // instantiated. - let result = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Existing(code_hash), - input.clone(), - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap(); + let result = builder::bare_instantiate(Code::Existing(code_hash)) + .data(input.clone()) + .build_and_unwrap_result(); assert_eq!(result.result.flags, flags); assert_eq!(result.result.data, buffer); assert!(!>::contains_key(result.account_id)); // Pass empty flags and therefore successfully instantiate the contract for later use. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Existing(code_hash), - ReturnFlags::empty().bits().encode(), - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Existing(code_hash)) + .data(ReturnFlags::empty().bits().encode()) + .build_and_unwrap_account_id(); // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - input.clone() - ), + builder::call(addr.clone()).data(input.clone()).build(), >::ContractReverted, ); // Calling directly: revert leads to success but the flags indicate the error - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - input, - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()).data(input).build_and_unwrap_result(); assert_eq!(result.flags, flags); assert_eq!(result.data, buffer); }); @@ -4407,20 +3196,9 @@ fn set_code_hash() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the 'caller' - let contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let contract_addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(300_000) + .build_and_unwrap_account_id(); // upload new code assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), @@ -4432,35 +3210,16 @@ fn set_code_hash() { System::reset_events(); // First call sets new code_hash and returns 1 - let result = Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - new_code_hash.as_ref().to_vec(), - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(contract_addr.clone()) + .data(new_code_hash.as_ref().to_vec()) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); assert_return_code!(result, 1); // Second calls new contract code that returns 2 - let result = Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(contract_addr.clone()) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); assert_return_code!(result, 2); // Checking for the last event only @@ -4512,37 +3271,16 @@ fn storage_deposit_limit_is_enforced() { // Setting insufficient storage_deposit should fail. assert_err!( - Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - Some((2 * min_balance + 3 - 1).into()), /* expected deposit is 2 * ed + 3 for - * the call */ - Code::Upload(wasm.clone()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result, + builder::bare_instantiate(Code::Upload(wasm.clone())) + // expected deposit is 2 * ed + 3 for the call + .storage_deposit_limit(Some((2 * min_balance + 3 - 1).into())) + .build() + .result, >::StorageDepositLimitExhausted, ); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); // Check that the BOB contract has been instantiated and has the minimum balance @@ -4553,14 +3291,10 @@ fn storage_deposit_limit_is_enforced() { // setting insufficient deposit limit, as it requires 3 Balance: // 2 for the item added + 1 for the new storage item. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(2)), - 1u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()) + .storage_deposit_limit(Some(codec::Compact(2))) + .data(1u32.to_le_bytes().to_vec()) + .build(), >::StorageDepositLimitExhausted, ); @@ -4571,26 +3305,12 @@ fn storage_deposit_limit_is_enforced() { // Create 1 byte of storage, should cost 3 Balance: // 2 for the item added + 1 for the new storage item. // Should pass as it fallbacks to DefaultDepositLimit. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - 1u32.to_le_bytes().to_vec() - )); + assert_ok!(builder::call(addr.clone()).data(1u32.to_le_bytes().to_vec()).build()); // Use 4 more bytes of the storage for the same item, which requires 4 Balance. // Should fail as DefaultDepositLimit is 3 and hence isn't enough. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - 5u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()).data(5u32.to_le_bytes().to_vec()).build(), >::StorageDepositLimitExhausted, ); }); @@ -4605,45 +3325,17 @@ fn deposit_limit_in_nested_calls() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Create both contracts: Constructors do nothing. - let addr_caller = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_caller), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let addr_callee = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_callee), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = + builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_account_id(); + let addr_callee = + builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_account_id(); // Create 100 bytes of storage with a price of per byte // This is 100 Balance + 2 Balance for the item - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_callee.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(102)), - 100u32.to_le_bytes().to_vec() - )); + assert_ok!(builder::call(addr_callee.clone()) + .storage_deposit_limit(Some(codec::Compact(102))) + .data(100u32.to_le_bytes().to_vec()) + .build()); // We do not remove any storage but add a storage item of 12 bytes in the caller // contract. This would cost 12 + 2 = 14 Balance. @@ -4651,14 +3343,10 @@ fn deposit_limit_in_nested_calls() { // This should fail as the specified parent's limit is less than the cost: 13 < // 14. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(13)), - (100u32, &addr_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(13))) + .data((100u32, &addr_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); // Now we specify the parent's limit high enough to cover the caller's storage additions. @@ -4668,14 +3356,10 @@ fn deposit_limit_in_nested_calls() { // This should fail as the specified parent's limit is less than the cost: 14 // < 15. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(14)), - (101u32, &addr_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(14))) + .data((101u32, &addr_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); @@ -4685,14 +3369,10 @@ fn deposit_limit_in_nested_calls() { // call should have a deposit limit of at least 2 Balance. The sub-call should be rolled // back, which is covered by the next test case. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(16)), - (102u32, &addr_callee, 1u64).encode(), - ), + builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(16))) + .data((102u32, &addr_callee, 1u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); @@ -4700,14 +3380,10 @@ fn deposit_limit_in_nested_calls() { // caller. Note that if previous sub-call wouldn't roll back, this call would pass making // the test case fail. We don't set a special limit for the nested call here. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(0)), - (87u32, &addr_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(0))) + .data((87u32, &addr_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); @@ -4716,28 +3392,19 @@ fn deposit_limit_in_nested_calls() { // Require more than the sender's balance. // We don't set a special limit for the nested call. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - None, - (1200u32, &addr_callee, 1u64).encode(), - ), + builder::call(addr_caller.clone()) + .data((1200u32, &addr_callee, 1u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); // Same as above but allow for the additional deposit of 1 Balance in parent. // We set the special deposit limit of 1 Balance for the nested call, which isn't // enforced as callee frees up storage. This should pass. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(1)), - (87u32, &addr_callee, 1u64).encode(), - )); + assert_ok!(builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(1))) + .data((87u32, &addr_callee, 1u64).encode()) + .build()); }); } @@ -4751,35 +3418,13 @@ fn deposit_limit_in_nested_instantiate() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); let _ = ::Currency::set_balance(&BOB, 1_000_000); // Create caller contract - let addr_caller = Contracts::bare_instantiate( - ALICE, - 10_000u64, // this balance is later passed to the deployed contract - GAS_LIMIT, - None, - Code::Upload(wasm_caller), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(wasm_caller)) + .value(10_000u64) // this balance is later passed to the deployed contract + .build_and_unwrap_account_id(); // Deploy a contract to get its occupied storage size - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_callee), - vec![0, 0, 0, 0], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm_callee)) + .data(vec![0, 0, 0, 0]) + .build_and_unwrap_account_id(); let callee_info_len = ContractInfoOf::::get(&addr).unwrap().encoded_size() as u64; @@ -4795,14 +3440,11 @@ fn deposit_limit_in_nested_instantiate() { // Provided the limit is set to be 1 Balance less, // this call should fail on the return from the caller contract. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 1)), - (0u32, &code_hash_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 1))) + .data((0u32, &code_hash_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); // The charges made on instantiation should be rolled back. @@ -4812,14 +3454,11 @@ fn deposit_limit_in_nested_instantiate() { // byte in the constructor. Hence +1 Balance to the limit is needed. This should fail on the // return from constructor. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 2)), - (1u32, &code_hash_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 2))) + .data((1u32, &code_hash_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); // The charges made on the instantiation should be rolled back. @@ -4829,14 +3468,11 @@ fn deposit_limit_in_nested_instantiate() { // This should fail during the charging for the instantiation in // `RawMeter::charge_instantiate()` assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 2)), - (0u32, &code_hash_callee, callee_info_len + 2 + ED + 1).encode(), - ), + builder::call(addr_caller.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 2))) + .data((0u32, &code_hash_callee, callee_info_len + 2 + ED + 1).encode()) + .build(), >::StorageDepositLimitExhausted, ); // The charges made on the instantiation should be rolled back. @@ -4847,31 +3483,22 @@ fn deposit_limit_in_nested_instantiate() { // Now we set enough limit for the parent call, but insufficient limit for child // instantiate. This should fail right after the constructor execution. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 3)), // enough parent limit - (1u32, &code_hash_callee, callee_info_len + 2 + ED + 2).encode(), - ), + builder::call(addr_caller.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 3))) // enough parent limit + .data((1u32, &code_hash_callee, callee_info_len + 2 + ED + 2).encode()) + .build(), >::StorageDepositLimitExhausted, ); // The charges made on the instantiation should be rolled back. assert_eq!(::Currency::free_balance(&BOB), 1_000_000); // Set enough deposit limit for the child instantiate. This should succeed. - let result = Contracts::bare_call( - BOB, - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 4).into()), - (1u32, &code_hash_callee, callee_info_len + 2 + ED + 3).encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr_caller.clone()) + .origin(BOB) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 4).into())) + .data((1u32, &code_hash_callee, callee_info_len + 2 + ED + 3).encode()) + .build(); let returned = result.result.unwrap(); // All balance of the caller except ED has been transferred to the callee. @@ -4891,7 +3518,7 @@ fn deposit_limit_in_nested_instantiate() { 1_000_000 - (callee_info_len + 2 + ED + 3) ); // Check that deposit due to be charged still includes these 3 Balance - assert_eq!(result.storage_deposit.charge_or_zero(), (callee_info_len + 2 + ED + 3),) + assert_eq!(result.storage_deposit.charge_or_zero(), (callee_info_len + 2 + ED + 3)) }); } @@ -4905,20 +3532,7 @@ fn deposit_limit_honors_liquidity_restrictions() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); // Check that the contract has been instantiated and has the minimum balance @@ -4933,14 +3547,11 @@ fn deposit_limit_honors_liquidity_restrictions() { ) .unwrap(); assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(200)), - 100u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(200))) + .data(100u32.to_le_bytes().to_vec()) + .build(), >::StorageDepositNotEnoughFunds, ); assert_eq!(::Currency::free_balance(&BOB), min_balance); @@ -4956,20 +3567,7 @@ fn deposit_limit_honors_existential_deposit() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); @@ -4979,14 +3577,11 @@ fn deposit_limit_honors_existential_deposit() { // check that the deposit can't bring the account below the existential deposit assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(900)), - 100u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(900))) + .data(100u32.to_le_bytes().to_vec()) + .build(), >::StorageDepositNotEnoughFunds, ); assert_eq!(::Currency::free_balance(&BOB), 1_000); @@ -5002,20 +3597,7 @@ fn deposit_limit_honors_min_leftover() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); @@ -5026,14 +3608,12 @@ fn deposit_limit_honors_min_leftover() { // check that the minimum leftover (value send) is considered assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr.clone(), - 400, - GAS_LIMIT, - Some(codec::Compact(500)), - 100u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .value(400) + .storage_deposit_limit(Some(codec::Compact(500))) + .data(100u32.to_le_bytes().to_vec()) + .build(), >::StorageDepositNotEnoughFunds, ); assert_eq!(::Currency::free_balance(&BOB), 1_000); @@ -5069,30 +3649,11 @@ fn cannot_instantiate_indeterministic_code() { // Try to instantiate directly from code assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm.clone(), - vec![], - vec![], - ), + builder::instantiate_with_code(wasm.clone()).build(), >::CodeRejected, ); assert_err!( - Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm.clone()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result, + builder::bare_instantiate(Code::Upload(wasm.clone())).build().result, >::CodeRejected, ); @@ -5116,48 +3677,17 @@ fn cannot_instantiate_indeterministic_code() { )); assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).build(), >::Indeterministic, ); assert_err!( - Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Existing(code_hash), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result, + builder::bare_instantiate(Code::Existing(code_hash)).build().result, >::Indeterministic, ); // Deploy contract which instantiates another contract - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = + builder::bare_instantiate(Code::Upload(caller_wasm)).build_and_unwrap_account_id(); // Try to instantiate `code_hash` from another contract in deterministic mode assert_err!( @@ -5211,20 +3741,8 @@ fn cannot_set_code_indeterministic_code() { )); // Create the contract that will call `seal_set_code_hash` - let caller_addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let caller_addr = + builder::bare_instantiate(Code::Upload(caller_wasm)).build_and_unwrap_account_id(); // We do not allow to set the code hash to a non-deterministic wasm assert_err!( @@ -5261,20 +3779,8 @@ fn delegate_call_indeterministic_code() { )); // Create the contract that will call `seal_delegate_call` - let caller_addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let caller_addr = + builder::bare_instantiate(Code::Upload(caller_wasm)).build_and_unwrap_account_id(); // The delegate call will fail in deterministic mode assert_err!( @@ -5331,17 +3837,9 @@ fn locking_delegate_dependency_works() { // Instantiate the caller contract with the given input. let instantiate = |input: &(u32, H256)| { - Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_caller.clone()), - input.encode(), - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) + builder::bare_instantiate(Code::Upload(wasm_caller.clone())) + .data(input.encode()) + .build() }; // Call contract with the given input. @@ -5509,17 +4007,7 @@ fn native_dependency_deposit_works() { }; // Instantiate the set_code_hash contract. - let res = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - code, - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ); + let res = builder::bare_instantiate(code).build(); let addr = res.result.unwrap().account_id; let base_deposit = ED + test_utils::contract_info_storage_deposit(&addr); @@ -5567,37 +4055,17 @@ fn reentrance_count_works_with_call() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let contract_addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(300_000) + .build_and_unwrap_account_id(); // passing reentrant count to the input let input = 0.encode(); - Contracts::bare_call( - ALICE, - contract_addr, - 0, - GAS_LIMIT, - None, - input, - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + builder::bare_call(contract_addr) + .data(input) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); }); } @@ -5608,37 +4076,17 @@ fn reentrance_count_works_with_delegated_call() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let contract_addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(300_000) + .build_and_unwrap_account_id(); // adding a callstack height to the input let input = (code_hash, 1).encode(); - Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - input, - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + builder::bare_call(contract_addr.clone()) + .data(input) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); }); } @@ -5651,63 +4099,23 @@ fn account_reentrance_count_works() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let contract_addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(300_000) + .build_and_unwrap_account_id(); - let another_contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm_reentrance_count), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let another_contract_addr = builder::bare_instantiate(Code::Upload(wasm_reentrance_count)) + .value(300_000) + .build_and_unwrap_account_id(); - let result1 = Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - contract_addr.encode(), - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result1 = builder::bare_call(contract_addr.clone()) + .data(contract_addr.encode()) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); - let result2 = Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - another_contract_addr.encode(), - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result2 = builder::bare_call(contract_addr.clone()) + .data(another_contract_addr.encode()) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); assert_eq!(result1.data, 1.encode()); assert_eq!(result2.data, 0.encode()); @@ -5754,7 +4162,7 @@ fn signed_cannot_set_code() { fn none_cannot_call_code() { ExtBuilder::default().build().execute_with(|| { assert_noop!( - Contracts::call(RuntimeOrigin::none(), BOB, 0, GAS_LIMIT, None, Vec::new()), + builder::call(BOB).origin(RuntimeOrigin::none()).build(), DispatchError::BadOrigin, ); }); @@ -5767,30 +4175,10 @@ fn root_can_call() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); // Call the contract. - assert_ok!(Contracts::call( - RuntimeOrigin::root(), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).origin(RuntimeOrigin::root()).build()); }); } @@ -5800,15 +4188,7 @@ fn root_cannot_instantiate_with_code() { ExtBuilder::default().build().execute_with(|| { assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::root(), - 0, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - ), + builder::instantiate_with_code(wasm).origin(RuntimeOrigin::root()).build(), DispatchError::BadOrigin ); }); @@ -5820,15 +4200,7 @@ fn root_cannot_instantiate() { ExtBuilder::default().build().execute_with(|| { assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::root(), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).origin(RuntimeOrigin::root()).build(), DispatchError::BadOrigin ); }); @@ -5881,53 +4253,25 @@ fn only_instantiation_origin_can_instantiate() { let _ = Balances::set_balance(&BOB, 1_000_000); assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::root(), - 0, - GAS_LIMIT, - None, - code.clone(), - vec![], - vec![], - ), + builder::instantiate_with_code(code.clone()) + .origin(RuntimeOrigin::root()) + .build(), DispatchError::BadOrigin ); assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(BOB), - 0, - GAS_LIMIT, - None, - code.clone(), - vec![], - vec![], - ), + builder::instantiate_with_code(code.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .build(), DispatchError::BadOrigin ); // Only Alice can instantiate - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code, - vec![], - vec![], - ),); + assert_ok!(builder::instantiate_with_code(code).build()); // Bob cannot instantiate with either `instantiate_with_code` or `instantiate`. assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(BOB), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).origin(RuntimeOrigin::signed(BOB)).build(), DispatchError::BadOrigin ); }); @@ -5940,44 +4284,18 @@ fn balance_api_returns_free_balance() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the BOB contract without any extra balance. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm.to_vec()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = + builder::bare_instantiate(Code::Upload(wasm.to_vec())).build_and_unwrap_account_id(); let value = 0; // Call BOB which makes it call the balance runtime API. // The contract code asserts that the returned balance is 0. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - value, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).value(value).build()); let value = 1; // Calling with value will trap the contract. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - value, - GAS_LIMIT, - None, - vec![] - ), + builder::call(addr.clone()).value(value).build(), >::ContractTrapped ); }); @@ -5989,37 +4307,14 @@ fn gas_consumed_is_linear_for_nested_calls() { ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_account_id(); let max_call_depth = ::CallStack::size() as u32; let [gas_0, gas_1, gas_2, gas_max] = { [0u32, 1u32, 2u32, max_call_depth] .iter() .map(|i| { - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - i.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).data(i.encode()).build(); assert_ok!(result.result); result.gas_consumed }) diff --git a/substrate/frame/contracts/src/tests/builder.rs b/substrate/frame/contracts/src/tests/builder.rs new file mode 100644 index 00000000000..08d12503a29 --- /dev/null +++ b/substrate/frame/contracts/src/tests/builder.rs @@ -0,0 +1,219 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{AccountId32, Test, ALICE, GAS_LIMIT}; +use crate::{ + tests::RuntimeOrigin, AccountIdLookupOf, AccountIdOf, BalanceOf, Code, CodeHash, CollectEvents, + ContractExecResult, ContractInstantiateResult, DebugInfo, Determinism, EventRecordOf, + ExecReturnValue, OriginFor, Pallet, Weight, +}; +use codec::Compact; +use frame_support::pallet_prelude::DispatchResultWithPostInfo; +use paste::paste; + +/// Helper macro to generate a builder for contract API calls. +macro_rules! builder { + // Entry point to generate a builder for the given method. + ( + $method:ident($($field:ident: $type:ty,)*) -> $result:ty + ) => { + paste!{ + builder!([< $method:camel Builder >], $method($($field: $type,)* ) -> $result); + } + }; + // Generate the builder struct and its methods. + ( + $name:ident, + $method:ident( + $($field:ident: $type:ty,)* + ) -> $result:ty + ) => { + #[doc = concat!("A builder to construct a ", stringify!($method), " call")] + pub struct $name { + $($field: $type,)* + } + + #[allow(dead_code)] + impl $name + { + $( + #[doc = concat!("Set the ", stringify!($field))] + pub fn $field(mut self, value: $type) -> Self { + self.$field = value; + self + } + )* + + #[doc = concat!("Build the ", stringify!($method), " call")] + pub fn build(self) -> $result { + Pallet::::$method( + $(self.$field,)* + ) + } + } + } +} + +builder!( + instantiate_with_code( + origin: OriginFor, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>>, + code: Vec, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo +); + +builder!( + instantiate( + origin: OriginFor, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>>, + code_hash: CodeHash, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo +); + +builder!( + bare_instantiate( + origin: AccountIdOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>, + code: Code>, + data: Vec, + salt: Vec, + debug: DebugInfo, + collect_events: CollectEvents, + ) -> ContractInstantiateResult, BalanceOf, EventRecordOf> +); + +builder!( + call( + origin: OriginFor, + dest: AccountIdLookupOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>>, + data: Vec, + ) -> DispatchResultWithPostInfo +); + +builder!( + bare_call( + origin: AccountIdOf, + dest: AccountIdOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>, + data: Vec, + debug: DebugInfo, + collect_events: CollectEvents, + determinism: Determinism, + ) -> ContractExecResult, EventRecordOf> +); + +/// Create a [`BareInstantiateBuilder`] with default values. +pub fn bare_instantiate(code: Code>) -> BareInstantiateBuilder { + BareInstantiateBuilder { + origin: ALICE, + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code, + data: vec![], + salt: vec![], + debug: DebugInfo::Skip, + collect_events: CollectEvents::Skip, + } +} + +impl BareInstantiateBuilder { + /// Build the instantiate call and unwrap the result. + pub fn build_and_unwrap_result(self) -> crate::InstantiateReturnValue> { + self.build().result.unwrap() + } + + /// Build the instantiate call and unwrap the account id. + pub fn build_and_unwrap_account_id(self) -> AccountIdOf { + self.build().result.unwrap().account_id + } +} + +/// Create a [`BareCallBuilder`] with default values. +pub fn bare_call(dest: AccountId32) -> BareCallBuilder { + BareCallBuilder { + origin: ALICE, + dest, + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + data: vec![], + debug: DebugInfo::Skip, + collect_events: CollectEvents::Skip, + determinism: Determinism::Enforced, + } +} + +impl BareCallBuilder { + /// Build the call and unwrap the result. + pub fn build_and_unwrap_result(self) -> ExecReturnValue { + self.build().result.unwrap() + } +} + +/// Create an [`InstantiateWithCodeBuilder`] with default values. +pub fn instantiate_with_code(code: Vec) -> InstantiateWithCodeBuilder { + InstantiateWithCodeBuilder { + origin: RuntimeOrigin::signed(ALICE), + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code, + data: vec![], + salt: vec![], + } +} + +/// Create an [`InstantiateBuilder`] with default values. +pub fn instantiate(code_hash: CodeHash) -> InstantiateBuilder { + InstantiateBuilder { + origin: RuntimeOrigin::signed(ALICE), + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code_hash, + data: vec![], + salt: vec![], + } +} + +/// Create a [`CallBuilder`] with default values. +pub fn call(dest: AccountIdLookupOf) -> CallBuilder { + CallBuilder { + origin: RuntimeOrigin::signed(ALICE), + dest, + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + data: vec![], + } +} -- GitLab From 987f1c24b679a310b77445b95375027fc8236768 Mon Sep 17 00:00:00 2001 From: Rodrigo Quelhas <22591718+RomarQ@users.noreply.github.com> Date: Thu, 28 Mar 2024 12:50:06 +0000 Subject: [PATCH 053/128] Update benchmarking README.md (#3862) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix reference links Co-authored-by: Bastian Köcher --- substrate/frame/benchmarking/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/substrate/frame/benchmarking/README.md b/substrate/frame/benchmarking/README.md index bf0bde2c3df..0b3680e1154 100644 --- a/substrate/frame/benchmarking/README.md +++ b/substrate/frame/benchmarking/README.md @@ -177,8 +177,8 @@ requirements. The benchmarking CLI uses a Handlebars template to format the final output file. You can optionally pass the flag `--template` pointing to a custom template that can be used instead. Within the template, you have access to all the data provided by the `TemplateData` struct in the [benchmarking CLI -writer](../../utils/frame/benchmarking-cli/src/writer.rs). You can find the default template used -[here](../../utils/frame/benchmarking-cli/src/template.hbs). +writer](../../utils/frame/benchmarking-cli/src/pallet/writer.rs). You can find the default template used +[here](../../utils/frame/benchmarking-cli/src/pallet/template.hbs). There are some custom Handlebars helpers included with our output generation: -- GitLab From 2e4e65711233c6f3a1adc9ce49af8f4537de5439 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 28 Mar 2024 14:10:56 +0100 Subject: [PATCH 054/128] Export unified ParachainHostFunctions (#3854) This PR exports unified hostfunctions needed for parachains. Basicaly `SubstrateHostFunctions` + `storage_proof_size::HostFunctions`. Also removes the native executor from the parachain template. --------- Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> --- Cargo.lock | 1 + cumulus/client/service/Cargo.toml | 1 + cumulus/client/service/src/lib.rs | 9 +++++++ cumulus/polkadot-parachain/src/service.rs | 6 ++--- prdoc/pr_3854.prdoc | 15 +++++++++++ templates/parachain/node/src/service.rs | 31 ++++------------------- 6 files changed, 33 insertions(+), 30 deletions(-) create mode 100644 prdoc/pr_3854.prdoc diff --git a/Cargo.lock b/Cargo.lock index 8188e571fcd..413bd28abe0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3803,6 +3803,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-io", "sp-runtime", "sp-transaction-pool", ] diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index 2bafbee951a..e03e20fe5b4 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -30,6 +30,7 @@ sp-consensus = { path = "../../../substrate/primitives/consensus/common" } sp-core = { path = "../../../substrate/primitives/core" } sp-runtime = { path = "../../../substrate/primitives/runtime" } sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool" } +sp-io = { path = "../../../substrate/primitives/io" } # Polkadot polkadot-primitives = { path = "../../../polkadot/primitives" } diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 950e59aff24..91e884d6f7e 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -54,6 +54,15 @@ use std::{sync::Arc, time::Duration}; pub use cumulus_primitives_proof_size_hostfunction::storage_proof_size; +/// Host functions that should be used in parachain nodes. +/// +/// Contains the standard substrate host functions, as well as a +/// host function to enable PoV-reclaim on parachain nodes. +pub type ParachainHostFunctions = ( + cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions, + sp_io::SubstrateHostFunctions, +); + // Given the sporadic nature of the explicit recovery operation and the // possibility to retry infinite times this value is more than enough. // In practice here we expect no more than one queued messages. diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index ddf595ca70c..e9bb5947522 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -69,13 +69,11 @@ use substrate_prometheus_endpoint::Registry; use polkadot_primitives::CollatorPair; #[cfg(not(feature = "runtime-benchmarks"))] -type HostFunctions = - (sp_io::SubstrateHostFunctions, cumulus_client_service::storage_proof_size::HostFunctions); +type HostFunctions = cumulus_client_service::ParachainHostFunctions; #[cfg(feature = "runtime-benchmarks")] type HostFunctions = ( - sp_io::SubstrateHostFunctions, - cumulus_client_service::storage_proof_size::HostFunctions, + cumulus_client_service::ParachainHostFunctions, frame_benchmarking::benchmarking::HostFunctions, ); diff --git a/prdoc/pr_3854.prdoc b/prdoc/pr_3854.prdoc new file mode 100644 index 00000000000..cfc8e246d7e --- /dev/null +++ b/prdoc/pr_3854.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Export unified `ParachainHostFunctions` from `cumulus-client-service` + +doc: + - audience: Node Dev + description: | + Exports `ParachainHostFunctions` to have a bundled version of `SubstrateHostFunctions` and + `cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions`. This increases discoverability and makes + it more obvious that they should be used together in parachain nodes. + +crates: + - name: cumulus-client-service + bump: minor diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index bb4a5394958..7e7bf1726b5 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -16,7 +16,8 @@ use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImpo use cumulus_client_consensus_proposer::Proposer; use cumulus_client_service::{ build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks, - BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, StartRelayChainTasksParams, + BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, ParachainHostFunctions, + StartRelayChainTasksParams, }; use cumulus_primitives_core::{relay_chain::CollatorPair, ParaId}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; @@ -25,9 +26,7 @@ use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; use sc_client_api::Backend; use sc_consensus::ImportQueue; -use sc_executor::{ - HeapAllocStrategy, NativeElseWasmExecutor, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, -}; +use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use sc_network::NetworkBlock; use sc_network_sync::SyncingService; use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}; @@ -36,25 +35,7 @@ use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_keystore::KeystorePtr; use substrate_prometheus_endpoint::Registry; -/// Native executor type. -pub struct ParachainNativeExecutor; - -impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { - type ExtendHostFunctions = ( - cumulus_client_service::storage_proof_size::HostFunctions, - frame_benchmarking::benchmarking::HostFunctions, - ); - - fn dispatch(method: &str, data: &[u8]) -> Option> { - parachain_template_runtime::apis::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - parachain_template_runtime::native_version() - } -} - -type ParachainExecutor = NativeElseWasmExecutor; +type ParachainExecutor = WasmExecutor; type ParachainClient = TFullClient; @@ -92,7 +73,7 @@ pub fn new_partial(config: &Configuration) -> Result .default_heap_pages .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { extra_pages: h as _ }); - let wasm = WasmExecutor::builder() + let executor = ParachainExecutor::builder() .with_execution_method(config.wasm_method) .with_onchain_heap_alloc_strategy(heap_pages) .with_offchain_heap_alloc_strategy(heap_pages) @@ -100,8 +81,6 @@ pub fn new_partial(config: &Configuration) -> Result .with_runtime_cache_size(config.runtime_cache_size) .build(); - let executor = ParachainExecutor::new_with_wasm_executor(wasm); - let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts_record_import::( config, -- GitLab From 79b08d884773443f5f306ee4a95eb435f944ac72 Mon Sep 17 00:00:00 2001 From: dharjeezy Date: Thu, 28 Mar 2024 14:12:14 +0100 Subject: [PATCH 055/128] Try State Hook for Beefy (#3246) Part of: https://github.com/paritytech/polkadot-sdk/issues/239 Polkadot address: 12GyGD3QhT4i2JJpNzvMf96sxxBLWymz4RdGCxRH5Rj5agKW --- prdoc/pr_3246.prdoc | 11 ++ substrate/frame/beefy/src/lib.rs | 62 ++++++++++ substrate/frame/beefy/src/mock.rs | 116 ++++++++++-------- substrate/frame/beefy/src/tests.rs | 190 +++++++++++++++-------------- 4 files changed, 240 insertions(+), 139 deletions(-) create mode 100644 prdoc/pr_3246.prdoc diff --git a/prdoc/pr_3246.prdoc b/prdoc/pr_3246.prdoc new file mode 100644 index 00000000000..19a823e5028 --- /dev/null +++ b/prdoc/pr_3246.prdoc @@ -0,0 +1,11 @@ +title: Try State Hook for Beefy. + +doc: + - audience: Runtime User + description: | + Invariants for storage items in the beefy pallet. Enforces the following Invariants: + 1. `Authorities` should not exceed the `MaxAuthorities` capacity. + 2. `NextAuthorities` should not exceed the `MaxAuthorities` capacity. + 3. `ValidatorSetId` must be present in `SetIdSession`. +crates: +- name: pallet-beefy diff --git a/substrate/frame/beefy/src/lib.rs b/substrate/frame/beefy/src/lib.rs index 87304eba8ba..09cd13ab70a 100644 --- a/substrate/frame/beefy/src/lib.rs +++ b/substrate/frame/beefy/src/lib.rs @@ -280,6 +280,14 @@ pub mod pallet { } } + #[pallet::hooks] + impl Hooks> for Pallet { + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() + } + } + #[pallet::validate_unsigned] impl ValidateUnsigned for Pallet { type Call = Call; @@ -294,6 +302,60 @@ pub mod pallet { } } +#[cfg(any(feature = "try-runtime", test))] +impl Pallet { + /// Ensure the correctness of the state of this pallet. + /// + /// This should be valid before or after each state transition of this pallet. + pub fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + Self::try_state_authorities()?; + Self::try_state_validators()?; + + Ok(()) + } + + /// # Invariants + /// + /// * `Authorities` should not exceed the `MaxAuthorities` capacity. + /// * `NextAuthorities` should not exceed the `MaxAuthorities` capacity. + fn try_state_authorities() -> Result<(), sp_runtime::TryRuntimeError> { + if let Some(authorities_len) = >::decode_len() { + ensure!( + authorities_len as u32 <= T::MaxAuthorities::get(), + "Authorities number exceeds what the pallet config allows." + ); + } else { + return Err(sp_runtime::TryRuntimeError::Other( + "Failed to decode length of authorities", + )); + } + + if let Some(next_authorities_len) = >::decode_len() { + ensure!( + next_authorities_len as u32 <= T::MaxAuthorities::get(), + "Next authorities number exceeds what the pallet config allows." + ); + } else { + return Err(sp_runtime::TryRuntimeError::Other( + "Failed to decode length of next authorities", + )); + } + Ok(()) + } + + /// # Invariants + /// + /// `ValidatorSetId` must be present in `SetIdSession` + fn try_state_validators() -> Result<(), sp_runtime::TryRuntimeError> { + let validator_set_id = >::get(); + ensure!( + SetIdSession::::get(validator_set_id).is_some(), + "Validator set id must be present in SetIdSession" + ); + Ok(()) + } +} + impl Pallet { /// Return the current active BEEFY validator set. pub fn validator_set() -> Option> { diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index fccc63bd1b4..1c55adc8de4 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -27,7 +27,6 @@ use frame_support::{ }; use pallet_session::historical as pallet_session_historical; use sp_core::{crypto::KeyTypeId, ConstU128}; -use sp_io::TestExternalities; use sp_runtime::{ app_crypto::ecdsa::Public, curve::PiecewiseLinear, impl_opaque_keys, testing::TestXt, traits::OpaqueKeys, BuildStorage, Perbill, @@ -210,6 +209,73 @@ impl pallet_offences::Config for Test { type OnOffenceHandler = Staking; } +#[derive(Default)] +pub struct ExtBuilder { + authorities: Vec, +} + +impl ExtBuilder { + /// Add some AccountIds to insert into `List`. + #[cfg(test)] + pub(crate) fn add_authorities(mut self, ids: Vec) -> Self { + self.authorities = ids; + self + } + + pub fn build(self) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + let balances: Vec<_> = + (0..self.authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); + + pallet_balances::GenesisConfig:: { balances } + .assimilate_storage(&mut t) + .unwrap(); + + let session_keys: Vec<_> = self + .authorities + .iter() + .enumerate() + .map(|(i, k)| (i as u64, i as u64, MockSessionKeys { dummy: k.clone() })) + .collect(); + + BasicExternalities::execute_with_storage(&mut t, || { + for (ref id, ..) in &session_keys { + frame_system::Pallet::::inc_providers(id); + } + }); + + pallet_session::GenesisConfig:: { keys: session_keys } + .assimilate_storage(&mut t) + .unwrap(); + + // controllers are same as stash + let stakers: Vec<_> = (0..self.authorities.len()) + .map(|i| (i as u64, i as u64, 10_000, pallet_staking::StakerStatus::::Validator)) + .collect(); + + let staking_config = pallet_staking::GenesisConfig:: { + stakers, + validator_count: 2, + force_era: pallet_staking::Forcing::ForceNew, + minimum_validator_count: 0, + invulnerables: vec![], + ..Default::default() + }; + + staking_config.assimilate_storage(&mut t).unwrap(); + + t.into() + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + self.build().execute_with(|| { + test(); + Beefy::do_try_state().expect("All invariants must hold after a test"); + }) + } +} + // Note, that we can't use `UintAuthorityId` here. Reason is that the implementation // of `to_public_key()` assumes, that a public key is 32 bytes long. This is true for // ed25519 and sr25519 but *not* for ecdsa. A compressed ecdsa public key is 33 bytes, @@ -226,54 +292,6 @@ pub fn mock_authorities(vec: Vec) -> Vec { vec.into_iter().map(|id| mock_beefy_id(id)).collect() } -pub fn new_test_ext(ids: Vec) -> TestExternalities { - new_test_ext_raw_authorities(mock_authorities(ids)) -} - -pub fn new_test_ext_raw_authorities(authorities: Vec) -> TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - - let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); - - pallet_balances::GenesisConfig:: { balances } - .assimilate_storage(&mut t) - .unwrap(); - - let session_keys: Vec<_> = authorities - .iter() - .enumerate() - .map(|(i, k)| (i as u64, i as u64, MockSessionKeys { dummy: k.clone() })) - .collect(); - - BasicExternalities::execute_with_storage(&mut t, || { - for (ref id, ..) in &session_keys { - frame_system::Pallet::::inc_providers(id); - } - }); - - pallet_session::GenesisConfig:: { keys: session_keys } - .assimilate_storage(&mut t) - .unwrap(); - - // controllers are same as stash - let stakers: Vec<_> = (0..authorities.len()) - .map(|i| (i as u64, i as u64, 10_000, pallet_staking::StakerStatus::::Validator)) - .collect(); - - let staking_config = pallet_staking::GenesisConfig:: { - stakers, - validator_count: 2, - force_era: pallet_staking::Forcing::ForceNew, - minimum_validator_count: 0, - invulnerables: vec![], - ..Default::default() - }; - - staking_config.assimilate_storage(&mut t).unwrap(); - - t.into() -} - pub fn start_session(session_index: SessionIndex) { for i in Session::current_index()..session_index { System::on_finalize(System::block_number()); diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs index 2950264e0c3..6a6aa245ce1 100644 --- a/substrate/frame/beefy/src/tests.rs +++ b/substrate/frame/beefy/src/tests.rs @@ -47,7 +47,7 @@ fn genesis_session_initializes_authorities() { let authorities = mock_authorities(vec![1, 2, 3, 4]); let want = authorities.clone(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { let authorities = beefy::Authorities::::get(); assert_eq!(authorities.len(), 4); @@ -69,130 +69,140 @@ fn session_change_updates_authorities() { let authorities = mock_authorities(vec![1, 2, 3, 4]); let want_validators = authorities.clone(); - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - assert!(0 == beefy::ValidatorSetId::::get()); + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + assert!(0 == beefy::ValidatorSetId::::get()); - init_block(1); + init_block(1); - assert!(1 == beefy::ValidatorSetId::::get()); + assert!(1 == beefy::ValidatorSetId::::get()); - let want = beefy_log(ConsensusLog::AuthoritiesChange( - ValidatorSet::new(want_validators, 1).unwrap(), - )); + let want = beefy_log(ConsensusLog::AuthoritiesChange( + ValidatorSet::new(want_validators, 1).unwrap(), + )); - let log = System::digest().logs[0].clone(); - assert_eq!(want, log); + let log = System::digest().logs[0].clone(); + assert_eq!(want, log); - init_block(2); + init_block(2); - assert!(2 == beefy::ValidatorSetId::::get()); + assert!(2 == beefy::ValidatorSetId::::get()); - let want = beefy_log(ConsensusLog::AuthoritiesChange( - ValidatorSet::new(vec![mock_beefy_id(2), mock_beefy_id(4)], 2).unwrap(), - )); + let want = beefy_log(ConsensusLog::AuthoritiesChange( + ValidatorSet::new(vec![mock_beefy_id(2), mock_beefy_id(4)], 2).unwrap(), + )); - let log = System::digest().logs[1].clone(); - assert_eq!(want, log); - }); + let log = System::digest().logs[1].clone(); + assert_eq!(want, log); + }); } #[test] fn session_change_updates_next_authorities() { let want = vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)]; - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - let next_authorities = beefy::NextAuthorities::::get(); + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + let next_authorities = beefy::NextAuthorities::::get(); - assert_eq!(next_authorities.len(), 4); - assert_eq!(want[0], next_authorities[0]); - assert_eq!(want[1], next_authorities[1]); - assert_eq!(want[2], next_authorities[2]); - assert_eq!(want[3], next_authorities[3]); + assert_eq!(next_authorities.len(), 4); + assert_eq!(want[0], next_authorities[0]); + assert_eq!(want[1], next_authorities[1]); + assert_eq!(want[2], next_authorities[2]); + assert_eq!(want[3], next_authorities[3]); - init_block(1); + init_block(1); - let next_authorities = beefy::NextAuthorities::::get(); + let next_authorities = beefy::NextAuthorities::::get(); - assert_eq!(next_authorities.len(), 2); - assert_eq!(want[1], next_authorities[0]); - assert_eq!(want[3], next_authorities[1]); - }); + assert_eq!(next_authorities.len(), 2); + assert_eq!(want[1], next_authorities[0]); + assert_eq!(want[3], next_authorities[1]); + }); } #[test] fn validator_set_at_genesis() { let want = vec![mock_beefy_id(1), mock_beefy_id(2)]; - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - let vs = Beefy::validator_set().unwrap(); + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + let vs = Beefy::validator_set().unwrap(); - assert_eq!(vs.id(), 0u64); - assert_eq!(vs.validators()[0], want[0]); - assert_eq!(vs.validators()[1], want[1]); - }); + assert_eq!(vs.id(), 0u64); + assert_eq!(vs.validators()[0], want[0]); + assert_eq!(vs.validators()[1], want[1]); + }); } #[test] fn validator_set_updates_work() { let want = vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)]; - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - let vs = Beefy::validator_set().unwrap(); - assert_eq!(vs.id(), 0u64); - assert_eq!(want[0], vs.validators()[0]); - assert_eq!(want[1], vs.validators()[1]); - assert_eq!(want[2], vs.validators()[2]); - assert_eq!(want[3], vs.validators()[3]); + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + let vs = Beefy::validator_set().unwrap(); + assert_eq!(vs.id(), 0u64); + assert_eq!(want[0], vs.validators()[0]); + assert_eq!(want[1], vs.validators()[1]); + assert_eq!(want[2], vs.validators()[2]); + assert_eq!(want[3], vs.validators()[3]); - init_block(1); + init_block(1); - let vs = Beefy::validator_set().unwrap(); + let vs = Beefy::validator_set().unwrap(); - assert_eq!(vs.id(), 1u64); - assert_eq!(want[0], vs.validators()[0]); - assert_eq!(want[1], vs.validators()[1]); + assert_eq!(vs.id(), 1u64); + assert_eq!(want[0], vs.validators()[0]); + assert_eq!(want[1], vs.validators()[1]); - init_block(2); + init_block(2); - let vs = Beefy::validator_set().unwrap(); + let vs = Beefy::validator_set().unwrap(); - assert_eq!(vs.id(), 2u64); - assert_eq!(want[1], vs.validators()[0]); - assert_eq!(want[3], vs.validators()[1]); - }); + assert_eq!(vs.id(), 2u64); + assert_eq!(want[1], vs.validators()[0]); + assert_eq!(want[3], vs.validators()[1]); + }); } #[test] fn cleans_up_old_set_id_session_mappings() { - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - let max_set_id_session_entries = MaxSetIdSessionEntries::get(); - - // we have 3 sessions per era - let era_limit = max_set_id_session_entries / 3; - // sanity check against division precision loss - assert_eq!(0, max_set_id_session_entries % 3); - // go through `max_set_id_session_entries` sessions - start_era(era_limit); - - // we should have a session id mapping for all the set ids from - // `max_set_id_session_entries` eras we have observed - for i in 1..=max_set_id_session_entries { - assert!(beefy::SetIdSession::::get(i as u64).is_some()); - } + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + let max_set_id_session_entries = MaxSetIdSessionEntries::get(); + + // we have 3 sessions per era + let era_limit = max_set_id_session_entries / 3; + // sanity check against division precision loss + assert_eq!(0, max_set_id_session_entries % 3); + // go through `max_set_id_session_entries` sessions + start_era(era_limit); + + // we should have a session id mapping for all the set ids from + // `max_set_id_session_entries` eras we have observed + for i in 1..=max_set_id_session_entries { + assert!(beefy::SetIdSession::::get(i as u64).is_some()); + } - // go through another `max_set_id_session_entries` sessions - start_era(era_limit * 2); + // go through another `max_set_id_session_entries` sessions + start_era(era_limit * 2); - // we should keep tracking the new mappings for new sessions - for i in max_set_id_session_entries + 1..=max_set_id_session_entries * 2 { - assert!(beefy::SetIdSession::::get(i as u64).is_some()); - } + // we should keep tracking the new mappings for new sessions + for i in max_set_id_session_entries + 1..=max_set_id_session_entries * 2 { + assert!(beefy::SetIdSession::::get(i as u64).is_some()); + } - // but the old ones should have been pruned by now - for i in 1..=max_set_id_session_entries { - assert!(beefy::SetIdSession::::get(i as u64).is_none()); - } - }); + // but the old ones should have been pruned by now + for i in 1..=max_set_id_session_entries { + assert!(beefy::SetIdSession::::get(i as u64).is_none()); + } + }); } /// Returns a list with 3 authorities with known keys: @@ -259,7 +269,7 @@ fn should_sign_and_verify() { fn report_equivocation_current_set_works() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { assert_eq!(Staking::current_era(), Some(0)); assert_eq!(Session::current_index(), 0); @@ -339,7 +349,7 @@ fn report_equivocation_current_set_works() { fn report_equivocation_old_set_works() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -422,7 +432,7 @@ fn report_equivocation_old_set_works() { fn report_equivocation_invalid_set_id() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -460,7 +470,7 @@ fn report_equivocation_invalid_set_id() { fn report_equivocation_invalid_session() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -503,7 +513,7 @@ fn report_equivocation_invalid_session() { fn report_equivocation_invalid_key_owner_proof() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -551,7 +561,7 @@ fn report_equivocation_invalid_key_owner_proof() { fn report_equivocation_invalid_equivocation_proof() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -624,7 +634,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -728,7 +738,7 @@ fn report_equivocation_has_valid_weight() { fn valid_equivocation_reports_dont_pay_fees() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -796,7 +806,7 @@ fn valid_equivocation_reports_dont_pay_fees() { fn set_new_genesis_works() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let new_genesis_delay = 10u64; -- GitLab From 6a0859ebf70db45e632219d95e932ea824488312 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Thu, 28 Mar 2024 15:14:17 +0200 Subject: [PATCH 056/128] bugfix: request fragment tree membership for all candidates (#3874) --- polkadot/node/core/backing/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 4b6beb5592e..b5cad4cf5f0 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -911,7 +911,7 @@ async fn handle_active_leaves_update( } let mut seconded_at_depth = HashMap::new(); - if let Some(response) = membership_answers.next().await { + while let Some(response) = membership_answers.next().await { match response { Err(oneshot::Canceled) => { gum::warn!( -- GitLab From c106dbd095cddb3254ab85eca99bdaa997db2ca3 Mon Sep 17 00:00:00 2001 From: tugy <33746108+tugytur@users.noreply.github.com> Date: Thu, 28 Mar 2024 15:07:52 +0100 Subject: [PATCH 057/128] add missing syscalls for workers (#2212) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Since the binary split additional syscalls are getting blocked in relation to the workers. With the hardened systemd file it shows the following warning: ``` Cannot fully enable landlock, a Linux kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider upgrading the kernel version for maximum security. status=Ok(NotEnforced) abi=1 ``` For it to work we need to allow additionally: - mount - umount2 - pivot_root and set `RestrictNamespaces=false` Added new line `SystemCallFilter=pivot_root` because otherwise it would get blocked by ~\@\privileged Co-authored-by: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Co-authored-by: Bastian Köcher --- polkadot/scripts/packaging/polkadot.service | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/polkadot/scripts/packaging/polkadot.service b/polkadot/scripts/packaging/polkadot.service index 7fb549c97f8..8c5a483d424 100644 --- a/polkadot/scripts/packaging/polkadot.service +++ b/polkadot/scripts/packaging/polkadot.service @@ -25,12 +25,13 @@ ProtectKernelTunables=true ProtectSystem=strict RemoveIPC=true RestrictAddressFamilies=AF_INET AF_INET6 AF_NETLINK AF_UNIX -RestrictNamespaces=true +RestrictNamespaces=false RestrictSUIDSGID=true SystemCallArchitectures=native SystemCallFilter=@system-service -SystemCallFilter=landlock_add_rule landlock_create_ruleset landlock_restrict_self seccomp -SystemCallFilter=~@clock @module @mount @reboot @swap @privileged +SystemCallFilter=landlock_add_rule landlock_create_ruleset landlock_restrict_self seccomp mount umount2 +SystemCallFilter=~@clock @module @reboot @swap @privileged +SystemCallFilter=pivot_root UMask=0027 [Install] -- GitLab From eb6f5abee64e979dba25924f71ef86d2b3ca2deb Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 28 Mar 2024 17:42:20 +0100 Subject: [PATCH 058/128] [ci] fix subsystem-benchmarks gha (#3876) PR adds variables validation and app credentials for pushing into gh-pages cc https://github.com/paritytech/ci_cd/issues/934 --- .github/workflows/subsystem-benchmarks.yml | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/.github/workflows/subsystem-benchmarks.yml b/.github/workflows/subsystem-benchmarks.yml index 37a9e0f4680..f0d56bf6e9d 100644 --- a/.github/workflows/subsystem-benchmarks.yml +++ b/.github/workflows/subsystem-benchmarks.yml @@ -15,7 +15,13 @@ on: jobs: subsystem-benchmarks: runs-on: ubuntu-latest + environment: subsystem-benchmarks steps: + - name: Validate inputs + run: | + echo "${{ github.event.inputs.benchmark-data-dir-path }}" | grep -P '^[a-z\-]' + echo "${{ github.event.inputs.output-file-path }}" | grep -P '^[a-z\-]+\.json' + - name: Checkout Sources uses: actions/checkout@v4.1.2 with: @@ -30,7 +36,13 @@ jobs: - name: Switch branch id: step_two run: | - git checkout master + git checkout master -- + + - uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: ${{ secrets.POLKADOTSDK_GHPAGES_APP_ID }} + private-key: ${{ secrets.POLKADOTSDK_GHPAGES_APP_KEY }} - name: Store benchmark result uses: benchmark-action/github-action-benchmark@v1 @@ -38,5 +50,5 @@ jobs: tool: "customSmallerIsBetter" output-file-path: ${{ github.event.inputs.output-file-path }} benchmark-data-dir-path: "bench/${{ github.event.inputs.benchmark-data-dir-path }}" - github-token: ${{ secrets.GITHUB_TOKEN }} + github-token: ${{ steps.app-token.outputs.token }} auto-push: true -- GitLab From 30ef8651ed0ba821e59121545815280a3e9b2862 Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Fri, 29 Mar 2024 09:49:34 +0200 Subject: [PATCH 059/128] collation-genereation: fix tests (#3883) Somehow https://github.com/paritytech/polkadot-sdk/pull/3795 was merged but tests are failing now on master. I suspect that CI is not even running these tests anymore which is a big issue. --------- Signed-off-by: Andrei Sandu --- .../node/collation-generation/src/tests.rs | 96 +++++++++---------- 1 file changed, 46 insertions(+), 50 deletions(-) diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs index 3cb3e61a35a..923a21e86fb 100644 --- a/polkadot/node/collation-generation/src/tests.rs +++ b/polkadot/node/collation-generation/src/tests.rs @@ -686,7 +686,7 @@ fn submit_collation_is_no_op_before_initialization() { fn submit_collation_leads_to_distribution() { let relay_parent = Hash::repeat_byte(0); let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42)); - let parent_head = HeadData::from(vec![1, 2, 3]); + let parent_head = dummy_head_data(); let para_id = ParaId::from(5); let expected_pvd = PersistedValidationData { parent_head: parent_head.clone(), @@ -707,7 +707,7 @@ fn submit_collation_leads_to_distribution() { msg: CollationGenerationMessage::SubmitCollation(SubmitCollationParams { relay_parent, collation: test_collation(), - parent_head: vec![1, 2, 3].into(), + parent_head: dummy_head_data(), validation_code_hash, result_sender: None, core_index: CoreIndex(0), @@ -795,16 +795,16 @@ fn distribute_collation_for_occupied_core_with_async_backing_enabled(#[case] run cores, runtime_version, claim_queue, - pending_availability, ) .await; - helpers::handle_core_processing_for_a_leaf( + helpers::handle_cores_processing_for_a_leaf( &mut virtual_overseer, activated_hash, para_id, // `CoreState` is `Occupied` => `OccupiedCoreAssumption` is `Included` OccupiedCoreAssumption::Included, 1, + pending_availability, ) .await; @@ -825,7 +825,7 @@ fn distribute_collation_for_occupied_cores_with_async_backing_enabled_and_elasti let activated_hash: Hash = [1; 32].into(); let para_id = ParaId::from(5); - let cores = (0..candidates_pending_avail) + let cores = (0..3) .into_iter() .map(|idx| { CoreState::Occupied(polkadot_primitives::OccupiedCore { @@ -864,17 +864,22 @@ fn distribute_collation_for_occupied_cores_with_async_backing_enabled_and_elasti // Using latest runtime with the fancy claim queue exposed. RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, claim_queue, - pending_availability, ) .await; - helpers::handle_core_processing_for_a_leaf( + helpers::handle_cores_processing_for_a_leaf( &mut virtual_overseer, activated_hash, para_id, - // `CoreState` is `Occupied` => `OccupiedCoreAssumption` is `Included` - OccupiedCoreAssumption::Included, + // if at least 1 cores is occupied => `OccupiedCoreAssumption` is `Included` + // else assumption is `Free`. + if candidates_pending_avail > 0 { + OccupiedCoreAssumption::Included + } else { + OccupiedCoreAssumption::Free + }, total_cores, + pending_availability, ) .await; @@ -890,12 +895,12 @@ fn distribute_collation_for_occupied_cores_with_async_backing_enabled_and_elasti #[case(1)] #[case(2)] fn distribute_collation_for_free_cores_with_async_backing_enabled_and_elastic_scaling( - #[case] candidates_pending_avail: u32, + #[case] total_cores: usize, ) { let activated_hash: Hash = [1; 32].into(); let para_id = ParaId::from(5); - let cores = (0..candidates_pending_avail) + let cores = (0..total_cores) .into_iter() .map(|_idx| CoreState::Scheduled(ScheduledCore { para_id, collator: None })) .collect::>(); @@ -905,7 +910,6 @@ fn distribute_collation_for_free_cores_with_async_backing_enabled_and_elastic_sc .enumerate() .map(|(idx, _core)| (CoreIndex::from(idx as u32), VecDeque::from([para_id]))) .collect::>(); - let total_cores = cores.len(); test_harness(|mut virtual_overseer| async move { helpers::initialize_collator(&mut virtual_overseer, para_id).await; @@ -918,17 +922,17 @@ fn distribute_collation_for_free_cores_with_async_backing_enabled_and_elastic_sc // Using latest runtime with the fancy claim queue exposed. RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, claim_queue, - vec![], ) .await; - helpers::handle_core_processing_for_a_leaf( + helpers::handle_cores_processing_for_a_leaf( &mut virtual_overseer, activated_hash, para_id, // `CoreState` is `Free` => `OccupiedCoreAssumption` is `Free` OccupiedCoreAssumption::Free, total_cores, + vec![], ) .await; @@ -963,6 +967,7 @@ fn no_collation_is_distributed_for_occupied_core_with_async_backing_disabled( test_harness(|mut virtual_overseer| async move { helpers::initialize_collator(&mut virtual_overseer, para_id).await; helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + helpers::handle_runtime_calls_on_new_head_activation( &mut virtual_overseer, activated_hash, @@ -970,7 +975,6 @@ fn no_collation_is_distributed_for_occupied_core_with_async_backing_disabled( cores, runtime_version, claim_queue, - vec![], ) .await; @@ -1047,7 +1051,6 @@ mod helpers { cores: Vec, runtime_version: u32, claim_queue: BTreeMap>, - pending_availability: Vec, ) { assert_matches!( overseer_recv(virtual_overseer).await, @@ -1082,25 +1085,6 @@ mod helpers { } ); - // Process the `ParaBackingState` message, and return some dummy state. - let message = overseer_recv(virtual_overseer).await; - let para_id = match message { - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::ParaBackingState(p_id, _), - )) => p_id, - _ => panic!("received unexpected message {:?}", message), - }; - - assert_matches!( - message, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::ParaBackingState(p_id, tx)) - ) if parent == activated_hash && p_id == para_id => { - tx.send(Ok(Some(dummy_backing_state(pending_availability)))).unwrap(); - } - ); - assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -1128,12 +1112,13 @@ mod helpers { // Handles all runtime requests performed in `handle_new_activations` for the case when a // collation should be prepared for the new leaf - pub async fn handle_core_processing_for_a_leaf( + pub async fn handle_cores_processing_for_a_leaf( virtual_overseer: &mut VirtualOverseer, activated_hash: Hash, para_id: ParaId, expected_occupied_core_assumption: OccupiedCoreAssumption, cores_assigned: usize, + pending_availability: Vec, ) { // Expect no messages if no cores is assigned to the para if cores_assigned == 0 { @@ -1143,7 +1128,7 @@ mod helpers { // Some hardcoded data - if needed, extract to parameters let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42)); - let parent_head = HeadData::from(vec![1, 2, 3]); + let parent_head = dummy_head_data(); let pvd = PersistedValidationData { parent_head: parent_head.clone(), relay_parent_number: 10, @@ -1151,6 +1136,15 @@ mod helpers { max_pov_size: 1024, }; + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ParaBackingState(p_id, tx)) + ) if parent == activated_hash && p_id == para_id => { + tx.send(Ok(Some(dummy_backing_state(pending_availability)))).unwrap(); + } + ); + assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::PersistedValidationData(id, a, tx))) => { @@ -1180,18 +1174,20 @@ mod helpers { } ); - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation{ - candidate_receipt, - parent_head_data_hash, - .. - }) => { - assert_eq!(parent_head_data_hash, parent_head.hash()); - assert_eq!(candidate_receipt.descriptor().persisted_validation_data_hash, pvd.hash()); - assert_eq!(candidate_receipt.descriptor().para_head, dummy_head_data().hash()); - assert_eq!(candidate_receipt.descriptor().validation_code_hash, validation_code_hash); - } - ); + for _ in 0..cores_assigned { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation{ + candidate_receipt, + parent_head_data_hash, + .. + }) => { + assert_eq!(parent_head_data_hash, parent_head.hash()); + assert_eq!(candidate_receipt.descriptor().persisted_validation_data_hash, pvd.hash()); + assert_eq!(candidate_receipt.descriptor().para_head, dummy_head_data().hash()); + assert_eq!(candidate_receipt.descriptor().validation_code_hash, validation_code_hash); + } + ); + } } } -- GitLab From b310b575cd73928cf061e1ae0d184f7e900976d5 Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Fri, 29 Mar 2024 10:12:40 +0100 Subject: [PATCH 060/128] Remove transient code after `im-online` pallet removal (#3383) Removes transient code introduced to clean up offchain database after `im-online` pallet removal. Should be merged after #2290 has been enacted. --- polkadot/runtime/rococo/src/lib.rs | 6 ------ polkadot/runtime/westend/src/lib.rs | 6 ------ 2 files changed, 12 deletions(-) diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index c41ffdbe72d..f37c901475a 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1868,12 +1868,6 @@ sp_api::impl_runtime_apis! { impl offchain_primitives::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { - use sp_runtime::{traits::Header, DigestItem}; - - if header.digest().logs().iter().any(|di| di == &DigestItem::RuntimeEnvironmentUpdated) { - pallet_im_online::migration::clear_offchain_storage(Session::validators().len() as u32); - } - Executive::offchain_worker(header) } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index e6381513170..d75c1011d5f 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1931,12 +1931,6 @@ sp_api::impl_runtime_apis! { impl offchain_primitives::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { - use sp_runtime::{traits::Header, DigestItem}; - - if header.digest().logs().iter().any(|di| di == &DigestItem::RuntimeEnvironmentUpdated) { - pallet_im_online::migration::clear_offchain_storage(Session::validators().len() as u32); - } - Executive::offchain_worker(header) } } -- GitLab From 5638d1a830dc70f56e5fdd7eded21a4f592d382c Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Fri, 29 Mar 2024 13:24:26 +0200 Subject: [PATCH 061/128] Decorate mpsc-notification-to-protocol with the protocol name (#3873) Currently, all protocols use the same metric name for `mpsc-notification-to-protocol` this is bad because we can't actually tell which protocol might cause problems. This patch proposes we derive the name of the metric from the protocol name, so that we have separate metrics for each protocol and properly detect which one is having problem processing its messages. --------- Signed-off-by: Alexandru Gheorghe --- .../src/protocol/notifications/service/mod.rs | 18 ++++++++++++++++-- substrate/client/utils/src/mpsc.rs | 7 ++++++- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/substrate/client/network/src/protocol/notifications/service/mod.rs b/substrate/client/network/src/protocol/notifications/service/mod.rs index 6d9873f45d5..dfb19daa28e 100644 --- a/substrate/client/network/src/protocol/notifications/service/mod.rs +++ b/substrate/client/network/src/protocol/notifications/service/mod.rs @@ -338,7 +338,8 @@ impl NotificationService for NotificationHandle { // Clone [`NotificationService`] fn clone(&mut self) -> Result, ()> { let mut subscribers = self.subscribers.lock(); - let (event_tx, event_rx) = tracing_unbounded("mpsc-notification-to-protocol", 100_000); + + let (event_tx, event_rx) = tracing_unbounded(self.rx.name(), 100_000); subscribers.push(event_tx); Ok(Box::new(NotificationHandle { @@ -624,7 +625,9 @@ pub fn notification_service( protocol: ProtocolName, ) -> (ProtocolHandlePair, Box) { let (cmd_tx, cmd_rx) = mpsc::channel(COMMAND_QUEUE_SIZE); - let (event_tx, event_rx) = tracing_unbounded("mpsc-notification-to-protocol", 100_000); + + let (event_tx, event_rx) = + tracing_unbounded(metric_label_for_protocol(&protocol).leak(), 100_000); let subscribers = Arc::new(Mutex::new(vec![event_tx])); ( @@ -632,3 +635,14 @@ pub fn notification_service( Box::new(NotificationHandle::new(protocol.clone(), cmd_tx, event_rx, subscribers)), ) } + +// Decorates the mpsc-notification-to-protocol metric with the name of the protocol, +// to be able to distiguish between different protocols in dashboards. +fn metric_label_for_protocol(protocol: &ProtocolName) -> String { + let protocol_name = protocol.to_string(); + let keys = protocol_name.split("/").collect::>(); + keys.iter() + .rev() + .take(2) // Last two tokens give the protocol name and version + .fold("mpsc-notification-to-protocol".into(), |acc, val| format!("{}-{}", acc, val)) +} diff --git a/substrate/client/utils/src/mpsc.rs b/substrate/client/utils/src/mpsc.rs index c24a5bd8904..91db7e1e7b0 100644 --- a/substrate/client/utils/src/mpsc.rs +++ b/substrate/client/utils/src/mpsc.rs @@ -86,7 +86,7 @@ pub fn tracing_unbounded( warning_fired: Arc::new(AtomicBool::new(false)), creation_backtrace: Arc::new(Backtrace::force_capture()), }; - let receiver = TracingUnboundedReceiver { inner: r, name }; + let receiver = TracingUnboundedReceiver { inner: r, name: name.into() }; (sender, receiver) } @@ -157,6 +157,11 @@ impl TracingUnboundedReceiver { pub fn len(&self) -> usize { self.inner.len() } + + /// The name of this receiver + pub fn name(&self) -> &'static str { + self.name + } } impl Drop for TracingUnboundedReceiver { -- GitLab From 0d9324847391e902bb42f84f0e76096b1f764efe Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 29 Mar 2024 15:13:21 +0200 Subject: [PATCH 062/128] Fix `addresses_to_publish_respects_existing_p2p_protocol` test in sc-authority-discovery (#3895) Fixes https://github.com/paritytech/polkadot-sdk/issues/3887. --- .../client/authority-discovery/src/worker.rs | 22 ++++++++++++++----- .../authority-discovery/src/worker/tests.rs | 14 +++++++----- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index 4ad7db5f7da..546f8cdbffd 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -342,6 +342,7 @@ where } fn addresses_to_publish(&self) -> impl Iterator { + let local_peer_id = self.network.local_peer_id(); let publish_non_global_ips = self.publish_non_global_ips; let addresses = self .public_addresses @@ -349,7 +350,15 @@ where .into_iter() .chain(self.network.external_addresses().into_iter().filter_map(|mut address| { // Make sure the reported external address does not contain `/p2p/...` protocol. - if let Some(multiaddr::Protocol::P2p(_)) = address.iter().last() { + if let Some(multiaddr::Protocol::P2p(peer_id)) = address.iter().last() { + if peer_id != *local_peer_id.as_ref() { + error!( + target: LOG_TARGET, + "Network returned external address '{address}' with peer id \ + not matching the local peer id '{local_peer_id}'.", + ); + debug_assert!(false); + } address.pop(); } @@ -375,15 +384,16 @@ where }) .collect::>(); - let peer_id = self.network.local_peer_id(); debug!( target: LOG_TARGET, - "Authority DHT record peer_id='{peer_id}' addresses='{addresses:?}'", + "Authority DHT record peer_id='{local_peer_id}' addresses='{addresses:?}'", ); - // The address must include the peer id. - let peer_id: Multihash = peer_id.into(); - addresses.into_iter().map(move |a| a.with(multiaddr::Protocol::P2p(peer_id))) + // The address must include the local peer id. + let local_peer_id: Multihash = local_peer_id.into(); + addresses + .into_iter() + .map(move |a| a.with(multiaddr::Protocol::P2p(local_peer_id))) } /// Publish own public addresses. diff --git a/substrate/client/authority-discovery/src/worker/tests.rs b/substrate/client/authority-discovery/src/worker/tests.rs index c2912088194..6c684d88e50 100644 --- a/substrate/client/authority-discovery/src/worker/tests.rs +++ b/substrate/client/authority-discovery/src/worker/tests.rs @@ -716,12 +716,16 @@ fn addresses_to_publish_adds_p2p() { #[test] fn addresses_to_publish_respects_existing_p2p_protocol() { let (_dht_event_tx, dht_event_rx) = channel(1000); + let identity = Keypair::generate_ed25519(); + let peer_id = identity.public().to_peer_id(); + let external_address = "/ip6/2001:db8::/tcp/30333" + .parse::() + .unwrap() + .with(multiaddr::Protocol::P2p(peer_id.into())); let network: Arc = Arc::new(TestNetwork { - external_addresses: vec![ - "/ip6/2001:db8::/tcp/30333/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC" - .parse() - .unwrap(), - ], + peer_id, + identity, + external_addresses: vec![external_address], ..Default::default() }); -- GitLab From 41257069b062ea7feb2277f11a2e992d3c9d5089 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Sun, 31 Mar 2024 20:59:33 +1100 Subject: [PATCH 063/128] Tokens in FRAME Docs (#2802) Closes https://github.com/paritytech/polkadot-sdk-docs/issues/70 WIP PR for an overview of how to develop tokens in FRAME. - [x] Tokens in Substrate Ref Doc - High-level overview of the token-related logic in FRAME - Improve docs with better explanation of how holds, freezes, ed, free balance, etc, all work - [x] Update `pallet_balances` docs - Clearly mark what is deprecated (currency) - [x] Write fungible trait docs - [x] Evaluate and if required update `pallet_assets`, `pallet_uniques`, `pallet_nfts` docs - [x] Absorb https://github.com/paritytech/polkadot-sdk/pull/2683/ - [x] Audit individual trait method docs, and improve if possible Feel free to suggest additional TODOs for this PR in the comments --------- Co-authored-by: Bill Laboon Co-authored-by: Francisco Aguirre Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Sebastian Kunert --- Cargo.lock | 3 + docs/sdk/Cargo.toml | 3 + docs/sdk/src/reference_docs/frame_currency.rs | 8 - .../reference_docs/frame_pallet_coupling.rs | 2 +- .../frame_runtime_upgrades_and_migrations.rs | 1 - docs/sdk/src/reference_docs/frame_tokens.rs | 131 +++++++++++++++++ docs/sdk/src/reference_docs/mod.rs | 6 +- substrate/frame/assets/src/lib.rs | 13 +- substrate/frame/balances/src/lib.rs | 83 +++++------ .../support/src/traits/tokens/currency.rs | 3 + .../src/traits/tokens/fungible/freeze.rs | 17 ++- .../src/traits/tokens/fungible/hold.rs | 7 +- .../src/traits/tokens/fungible/imbalance.rs | 2 + .../src/traits/tokens/fungible/item_of.rs | 5 + .../support/src/traits/tokens/fungible/mod.rs | 139 ++++++++++++++++-- .../src/traits/tokens/fungible/regular.rs | 2 + .../src/traits/tokens/fungible/union_of.rs | 2 + .../src/traits/tokens/fungibles/approvals.rs | 2 + .../src/traits/tokens/fungibles/enumerable.rs | 4 + .../src/traits/tokens/fungibles/freeze.rs | 2 + .../src/traits/tokens/fungibles/hold.rs | 2 + .../src/traits/tokens/fungibles/imbalance.rs | 2 + .../src/traits/tokens/fungibles/lifetime.rs | 2 + .../src/traits/tokens/fungibles/metadata.rs | 2 + .../src/traits/tokens/fungibles/mod.rs | 11 +- .../src/traits/tokens/fungibles/regular.rs | 2 + .../src/traits/tokens/fungibles/roles.rs | 2 + .../src/traits/tokens/fungibles/union_of.rs | 2 + 28 files changed, 377 insertions(+), 83 deletions(-) delete mode 100644 docs/sdk/src/reference_docs/frame_currency.rs create mode 100644 docs/sdk/src/reference_docs/frame_tokens.rs diff --git a/Cargo.lock b/Cargo.lock index 413bd28abe0..81eb682a27d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13390,11 +13390,14 @@ dependencies = [ "pallet-example-single-block-migrations", "pallet-examples", "pallet-multisig", + "pallet-nfts", + "pallet-preimage", "pallet-proxy", "pallet-referenda", "pallet-scheduler", "pallet-timestamp", "pallet-transaction-payment", + "pallet-uniques", "pallet-utility", "parity-scale-codec", "sc-cli", diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 434202ed693..3b8f45d7756 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -66,6 +66,7 @@ pallet-aura = { path = "../../substrate/frame/aura", default-features = false } pallet-timestamp = { path = "../../substrate/frame/timestamp" } pallet-balances = { path = "../../substrate/frame/balances" } pallet-assets = { path = "../../substrate/frame/assets" } +pallet-preimage = { path = "../../substrate/frame/preimage" } pallet-transaction-payment = { path = "../../substrate/frame/transaction-payment" } pallet-utility = { path = "../../substrate/frame/utility" } pallet-multisig = { path = "../../substrate/frame/multisig" } @@ -73,6 +74,8 @@ pallet-proxy = { path = "../../substrate/frame/proxy" } pallet-authorship = { path = "../../substrate/frame/authorship" } pallet-collective = { path = "../../substrate/frame/collective" } pallet-democracy = { path = "../../substrate/frame/democracy" } +pallet-uniques = { path = "../../substrate/frame/uniques" } +pallet-nfts = { path = "../../substrate/frame/nfts" } pallet-scheduler = { path = "../../substrate/frame/scheduler" } # Primitives diff --git a/docs/sdk/src/reference_docs/frame_currency.rs b/docs/sdk/src/reference_docs/frame_currency.rs deleted file mode 100644 index 6987d51aec8..00000000000 --- a/docs/sdk/src/reference_docs/frame_currency.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! FRAME Currency Abstractions and Traits -//! -//! Notes: -//! -//! - History, `Currency` trait. -//! - `Hold` and `Freeze` with diagram. -//! - `HoldReason` and `FreezeReason` -//! - This footgun: diff --git a/docs/sdk/src/reference_docs/frame_pallet_coupling.rs b/docs/sdk/src/reference_docs/frame_pallet_coupling.rs index cca7f9feb3f..be464bbbf83 100644 --- a/docs/sdk/src/reference_docs/frame_pallet_coupling.rs +++ b/docs/sdk/src/reference_docs/frame_pallet_coupling.rs @@ -143,7 +143,7 @@ //! For example, all pallets in `polkadot-sdk` that needed to work with currencies could have been //! tightly coupled with [`pallet_balances`]. But, `polkadot-sdk` also provides [`pallet_assets`] //! (and more implementations by the community), therefore all pallets use traits to loosely couple -//! with balances or assets pallet. More on this in [`crate::reference_docs::frame_currency`]. +//! with balances or assets pallet. More on this in [`crate::reference_docs::frame_tokens`]. //! //! ## Further References //! diff --git a/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs index cbbf611f9dc..f9a69b892a3 100644 --- a/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs +++ b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs @@ -131,7 +131,6 @@ //! //! TODO: Link to multi block migration example/s once PR is merged (). //! -//! [`GetStorageVersion`]: frame_support::traits::GetStorageVersion //! [`OnRuntimeUpgrade`]: frame_support::traits::OnRuntimeUpgrade //! [`StorageVersion`]: frame_support::traits::StorageVersion //! [`set_code`]: frame_system::Call::set_code diff --git a/docs/sdk/src/reference_docs/frame_tokens.rs b/docs/sdk/src/reference_docs/frame_tokens.rs new file mode 100644 index 00000000000..c9d34e2091d --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_tokens.rs @@ -0,0 +1,131 @@ +// This file is part of polkadot-sdk. +// +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # FRAME Tokens +//! +//! This reference doc serves as a high-level overview of the token-related logic in FRAME, and +//! how to properly apply it to your use case. +//! +//! On completion of reading this doc, you should have a good understanding of: +//! - The distinction between token traits and trait implementations in FRAME, and why this +//! distinction is helpful +//! - Token-related traits avaliable in FRAME +//! - Token-related trait implementations in FRAME +//! - How to choose the right trait or trait implementation for your use case +//! - Where to go next +//! +//! ## Getting Started +//! +//! The most ubiquitous way to add a token to a FRAME runtime is [`pallet_balances`]. Read +//! more about pallets [here](crate::polkadot_sdk::frame_runtime#pallets). +//! +//! You may then write custom pallets that interact with [`pallet_balances`]. The fastest way to +//! get started with that is by +//! [tightly coupling](crate::reference_docs::frame_pallet_coupling#tight-coupling-pallets) your +//! custom pallet to [`pallet_balances`]. +//! +//! However, to keep pallets flexible and modular, it is often prefered to +//! [loosely couple](crate::reference_docs::frame_pallet_coupling#loosely--coupling-pallets). +//! +//! To achieve loose coupling, +//! we separate token logic into traits and trait implementations. +//! +//! ## Traits and Trait Implementations +//! +//! Broadly speaking, token logic in FRAME can be divided into two categories: traits and +//! trait implementations. +//! +//! **Traits** define common interfaces that types of tokens should implement. For example, the +//! [`fungible::Inspect`](`frame_support::traits::fungible::Inspect`) trait specifies an interface +//! for *inspecting* token state such as the total issuance of the token, the balance of individual +//! accounts, etc. +//! +//! **Trait implementations** are concrete implementations of these traits. For example, one of the +//! many traits [`pallet_balances`] implements is +//! [`fungible::Inspect`](`frame_support::traits::fungible::Inspect`)*. It provides the concrete way +//! of inspecting the total issuance, balance of accounts, etc. There can be many implementations of +//! the same traits. +//! +//! The distinction between traits and trait implementations is helpful because it allows pallets +//! and other logic to be generic over their dependencies, avoiding tight coupling. +//! +//! To illustrate this with an example let's consider [`pallet_preimage`]. This pallet takes a +//! deposit in exchange for storing a preimage for later use. A naive implementation of the +//! pallet may use [`pallet_balances`] in a tightly coupled manner, directly calling methods +//! on the pallet to reserve and unreserve deposits. This approach works well, +//! until someone has a use case requiring that an asset from a different pallet such as +//! [`pallet_assets`] is used for the deposit. Rather than tightly couple [`pallet_preimage`] to +//! [`pallet_balances`], [`pallet_assets`], and every other token-handling pallet a user +//! could possibly specify, [`pallet_preimage`] does not specify a concrete pallet as a dependency +//! but instead accepts any dependency which implements the +//! [`currency::ReservableCurrency`](`frame_support::traits::tokens::currency::ReservableCurrency`) +//! trait, namely via its [`Config::Currency`](`pallet_preimage::pallet::Config::Currency`) +//! associated type. This allows [`pallet_preimage`] to support any arbitrary pallet implementing +//! this trait, without needing any knowledge of what those pallets may be or requiring changes to +//! support new pallets which may be written in the future. +//! +//! Read more about coupling, and the benefits of loose coupling +//! [here](crate::reference_docs::frame_pallet_coupling). +//! +//! ##### *Rust Advanced Tip +//! +//! The knowledge that [`pallet_balances`] implements +//! [`fungible::Inspect`](`frame_support::traits::fungible::Inspect`) is not some arcane knowledge +//! that you have to know by heart or memorize. One can simply look at the list of the implementors +//! of any trait in the Rust Doc to find all implementors (e.g. +//! ), +//! or use the `rust-analyzer` `Implementations` action. +//! +//! ## Fungible Token Traits in FRAME +//! +//! The [`fungible`](`frame_support::traits::fungible`) crate contains the latest set of FRAME +//! fungible token traits, and is recommended to use for all new logic requiring a fungible token. +//! See the crate documentation for more info about these fungible traits. +//! +//! [`fungibles`](`frame_support::traits::fungibles`) provides very similar functionality to +//! [`fungible`](`frame_support::traits::fungible`), except it supports managing multiple tokens. +//! +//! You may notice the trait [`Currency`](`frame_support::traits::Currency`) with similar +//! functionality is also used in the codebase, however this trait is deprecated and existing logic +//! is in the process of being migrated to [`fungible`](`frame_support::traits::fungible`) ([tracking issue](https://github.com/paritytech/polkadot-sdk/issues/226)). +//! +//! ## Fungible Token Trait Implementations in FRAME +//! +//! [`pallet_balances`] implements [`fungible`](`frame_support::traits::fungible`), and is the most +//! commonly used fungible implementation in FRAME. Most of the time, it's used for managing the +//! native token of the blockchain network it's used in. +//! +//! [`pallet_assets`] implements [`fungibles`](`frame_support::traits::fungibles`), and is another +//! popular fungible token implementation. It supports the creation and management of multiple +//! assets in a single crate, making it a good choice when a network requires more assets in +//! addition to its native token. +//! +//! ## Non-Fungible Tokens in FRAME +//! +//! [`pallet_nfts`] is recommended to use for all NFT use cases in FRAME. +//! See the crate documentation for more info about this pallet. +//! +//! [`pallet_uniques`] is deprecated and should not be used. +//! +//! +//! # What Next? +//! +//! - If you are interested in implementing a single fungible token, continue reading the +//! [`fungible`](`frame_support::traits::fungible`) and [`pallet_balances`] docs. +//! - If you are interested in implementing a set of fungible tokens, continue reading the +//! [`fungibles`](`frame_support::traits::fungibles`) trait and [`pallet_assets`] docs. +//! - If you are interested in implementing an NFT, continue reading the [`pallet_nfts`] docs. diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index a0d8d05b449..145df8844f2 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -65,9 +65,6 @@ pub mod metadata; /// Learn about how frame-system handles `account-ids`, nonces, consumers and providers. pub mod frame_system_accounts; -/// Learn about the currency-related abstractions provided in FRAME. -pub mod frame_currency; - /// Advice for configuring your development environment for Substrate development. pub mod development_environment_advice; @@ -75,6 +72,9 @@ pub mod development_environment_advice; // TODO: @shawntabrizi @ggwpez https://github.com/paritytech/polkadot-sdk-docs/issues/50 pub mod frame_benchmarking_weight; +/// Learn about the token-related logic in FRAME and how to apply it to your use case. +pub mod frame_tokens; + /// Learn about chain specification file and the genesis state of the blockchain. // TODO: @michalkucharczyk https://github.com/paritytech/polkadot-sdk-docs/issues/51 pub mod chain_spec_genesis; diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index c5468e4237d..e5fe2a3d1fd 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -17,7 +17,16 @@ //! # Assets Pallet //! -//! A simple, secure module for dealing with fungible assets. +//! A simple, secure module for dealing with sets of assets implementing +//! [`fungible`](frame_support::traits::fungible) traits, via +//! [`fungibles`](frame_support::traits::fungibles) traits. +//! +//! The pallet makes heavy use of concepts such as Holds and Freezes from the +//! [`frame_support::traits::fungible`] traits, therefore you should read and understand those docs +//! as a prerequisite to understanding this pallet. +//! +//! See the [`frame_tokens`] reference docs for more information about the place of the +//! Assets pallet in FRAME. //! //! ## Overview //! @@ -133,6 +142,8 @@ //! //! * [`System`](../frame_system/index.html) //! * [`Support`](../frame_support/index.html) +//! +//! [`frame_tokens`]: ../polkadot_sdk_docs/reference_docs/frame_tokens/index.html // This recursion limit is needed because we have too many benchmarks and benchmarking will fail if // we add more without this limit. diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 80278752207..685b12499ac 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -17,11 +17,15 @@ //! # Balances Pallet //! -//! The Balances pallet provides functionality for handling accounts and balances. +//! The Balances pallet provides functionality for handling accounts and balances for a single +//! token. //! -//! - [`Config`] -//! - [`Call`] -//! - [`Pallet`] +//! It makes heavy use of concepts such as Holds and Freezes from the +//! [`frame_support::traits::fungible`] traits, therefore you should read and understand those docs +//! as a prerequisite to understanding this pallet. +//! +//! Also see the [`frame_tokens`] reference docs for higher level information regarding the +//! place of this palet in FRAME. //! //! ## Overview //! @@ -38,42 +42,30 @@ //! //! ### Terminology //! -//! - **Existential Deposit:** The minimum balance required to create or keep an account open. This -//! prevents "dust accounts" from filling storage. When the free plus the reserved balance (i.e. -//! the total balance) fall below this, then the account is said to be dead; and it loses its -//! functionality as well as any prior history and all information on it is removed from the -//! chain's state. No account should ever have a total balance that is strictly between 0 and the -//! existential deposit (exclusive). If this ever happens, it indicates either a bug in this -//! pallet or an erroneous raw mutation of storage. -//! -//! - **Total Issuance:** The total number of units in existence in a system. -//! //! - **Reaping an account:** The act of removing an account by resetting its nonce. Happens after -//! its total balance has become zero (or, strictly speaking, less than the Existential Deposit). -//! -//! - **Free Balance:** The portion of a balance that is not reserved. The free balance is the only -//! balance that matters for most operations. +//! its total balance has become less than the Existential Deposit. //! -//! - **Reserved Balance:** Reserved balance still belongs to the account holder, but is suspended. -//! Reserved balance can still be slashed, but only after all the free balance has been slashed. -//! -//! - **Imbalance:** A condition when some funds were credited or debited without equal and opposite -//! accounting (i.e. a difference between total issuance and account balances). Functions that -//! result in an imbalance will return an object of the `Imbalance` trait that can be managed within -//! your runtime logic. (If an imbalance is simply dropped, it should automatically maintain any -//! book-keeping such as total issuance.) +//! ### Implementations //! -//! - **Lock:** A freeze on a specified amount of an account's free balance until a specified block -//! number. Multiple locks always operate over the same funds, so they "overlay" rather than -//! "stack". +//! The Balances pallet provides implementations for the following [`fungible`] traits. If these +//! traits provide the functionality that you need, then you should avoid tight coupling with the +//! Balances pallet. //! -//! ### Implementations +//! - [`fungible::Inspect`] +//! - [`fungible::Mutate`] +//! - [`fungible::Unbalanced`] +//! - [`fungible::Balanced`] +//! - [`fungible::BalancedHold`] +//! - [`fungible::InspectHold`] +//! - [`fungible::MutateHold`] +//! - [`fungible::InspectFreeze`] +//! - [`fungible::MutateFreeze`] +//! - [`fungible::Imbalance`] //! -//! The Balances pallet provides implementations for the following traits. If these traits provide -//! the functionality that you need, then you can avoid coupling with the Balances pallet. +//! It also implements the following [`Currency`] related traits, however they are deprecated and +//! will eventually be removed. //! -//! - [`Currency`]: Functions for dealing with a -//! fungible assets system. +//! - [`Currency`]: Functions for dealing with a fungible assets system. //! - [`ReservableCurrency`] //! - [`NamedReservableCurrency`](frame_support::traits::NamedReservableCurrency): //! Functions for dealing with assets that can be reserved from an account. @@ -83,14 +75,6 @@ //! imbalances between total issuance in the system and account balances. Must be used when a //! function creates new funds (e.g. a reward) or destroys some funds (e.g. a system fee). //! -//! ## Interface -//! -//! ### Dispatchable Functions -//! -//! - `transfer_allow_death` - Transfer some liquid free balance to another account. -//! - `force_set_balance` - Set the balances of a given account. The origin of this call must be -//! root. -//! //! ## Usage //! //! The following examples show how to use the Balances pallet in your custom pallet. @@ -151,8 +135,11 @@ //! * Total issued balanced of all accounts should be less than `Config::Balance::max_value()`. //! * Existential Deposit is set to a value greater than zero. //! -//! Note, you may find the Balances pallet still functions with an ED of zero in some circumstances, -//! however this is not a configuration which is generally supported, nor will it be. +//! Note, you may find the Balances pallet still functions with an ED of zero when the +//! `insecure_zero_ed` cargo feature is enabled. However this is not a configuration which is +//! generally supported, nor will it be. +//! +//! [`frame_tokens`]: ../polkadot_sdk_docs/reference_docs/frame_tokens/index.html #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; @@ -308,10 +295,14 @@ pub mod pallet { /// The maximum number of locks that should exist on an account. /// Not strictly enforced, but used for weight estimation. + /// + /// Use of locks is deprecated in favour of freezes. See `https://github.com/paritytech/substrate/pull/12951/` #[pallet::constant] type MaxLocks: Get; /// The maximum number of named reserves that can exist on an account. + /// + /// Use of reserves is deprecated in favour of holds. See `https://github.com/paritytech/substrate/pull/12951/` #[pallet::constant] type MaxReserves: Get; @@ -455,6 +446,8 @@ pub mod pallet { /// Any liquidity locks on some account balances. /// NOTE: Should only be accessed when setting, changing and freeing a lock. + /// + /// Use of locks is deprecated in favour of freezes. See `https://github.com/paritytech/substrate/pull/12951/` #[pallet::storage] #[pallet::getter(fn locks)] pub type Locks, I: 'static = ()> = StorageMap< @@ -466,6 +459,8 @@ pub mod pallet { >; /// Named reserves on some account balances. + /// + /// Use of reserves is deprecated in favour of holds. See `https://github.com/paritytech/substrate/pull/12951/` #[pallet::storage] #[pallet::getter(fn reserves)] pub type Reserves, I: 'static = ()> = StorageMap< diff --git a/substrate/frame/support/src/traits/tokens/currency.rs b/substrate/frame/support/src/traits/tokens/currency.rs index 282e7f64473..b3db4c98001 100644 --- a/substrate/frame/support/src/traits/tokens/currency.rs +++ b/substrate/frame/support/src/traits/tokens/currency.rs @@ -16,6 +16,9 @@ // limitations under the License. //! The Currency trait and associated types. +//! +//! Note Currency and related traits are deprecated, instead +//! [`fungible`](frame_support::traits::fungible) traits should be used. use super::{ imbalance::{Imbalance, SignedImbalance}, diff --git a/substrate/frame/support/src/traits/tokens/fungible/freeze.rs b/substrate/frame/support/src/traits/tokens/fungible/freeze.rs index 8b542ee4c60..96efbc6ab89 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/freeze.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/freeze.rs @@ -16,6 +16,9 @@ // limitations under the License. //! The traits for putting freezes within a single fungible token class. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits +//! including the place of the Freezes in FRAME. use scale_info::TypeInfo; use sp_arithmetic::{ @@ -35,7 +38,7 @@ pub trait Inspect: super::Inspect { /// An identifier for a freeze. type Id: codec::Encode + TypeInfo + 'static; - /// Amount of funds held in reserve by `who` for the given `id`. + /// Amount of funds frozen in reserve by `who` for the given `id`. fn balance_frozen(id: &Self::Id, who: &AccountId) -> Self::Balance; /// The amount of the balance which can become frozen. Defaults to `total_balance()`. @@ -45,11 +48,11 @@ pub trait Inspect: super::Inspect { /// Returns `true` if it's possible to introduce a freeze for the given `id` onto the /// account of `who`. This will be true as long as the implementor supports as many - /// concurrent freeze locks as there are possible values of `id`. + /// concurrent freezes as there are possible values of `id`. fn can_freeze(id: &Self::Id, who: &AccountId) -> bool; } -/// Trait for introducing, altering and removing locks to freeze an account's funds so they never +/// Trait for introducing, altering and removing freezes for an account for its funds never /// go below a set minimum. pub trait Mutate: Inspect { /// Prevent actions which would reduce the balance of the account of `who` below the given @@ -66,16 +69,16 @@ pub trait Mutate: Inspect { /// counteract any pre-existing freezes in place for `who` under the `id`. Also unlike /// `set_freeze`, in the case that `amount` is zero, this is no-op and never fails. /// - /// Note that more funds can be locked than the total balance, if desired. + /// Note that more funds can be frozen than the total balance, if desired. fn extend_freeze(id: &Self::Id, who: &AccountId, amount: Self::Balance) -> DispatchResult; - /// Remove an existing lock. + /// Remove an existing freeze. fn thaw(id: &Self::Id, who: &AccountId) -> DispatchResult; /// Attempt to alter the amount frozen under the given `id` to `amount`. /// /// Fail if the account of `who` has fewer freezable funds than `amount`, unless `fortitude` is - /// `Fortitude::Force`. + /// [`Fortitude::Force`]. fn set_frozen( id: &Self::Id, who: &AccountId, @@ -91,7 +94,7 @@ pub trait Mutate: Inspect { /// the amount frozen under `id`. Do nothing otherwise. /// /// Fail if the account of `who` has fewer freezable funds than `amount`, unless `fortitude` is - /// `Fortitude::Force`. + /// [`Fortitude::Force`]. fn ensure_frozen( id: &Self::Id, who: &AccountId, diff --git a/substrate/frame/support/src/traits/tokens/fungible/hold.rs b/substrate/frame/support/src/traits/tokens/fungible/hold.rs index 6da652d2998..28ece25c91d 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/hold.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/hold.rs @@ -16,6 +16,9 @@ // limitations under the License. //! The traits for putting holds within a single fungible token class. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits +//! including the place of the Holds in FRAME. use crate::{ ensure, @@ -214,8 +217,8 @@ pub trait Mutate: /// /// The actual amount released is returned with `Ok`. /// - /// If `precision` is `BestEffort`, then the amount actually unreserved and returned as the - /// inner value of `Ok` may be smaller than the `amount` passed. + /// If `precision` is [`Precision::BestEffort`], then the amount actually unreserved and + /// returned as the inner value of `Ok` may be smaller than the `amount` passed. /// /// NOTE! The inner of the `Ok` result variant returns the *actual* amount released. This is the /// opposite of the `ReservableCurrency::unreserve()` result, which gives the amount not able diff --git a/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs b/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs index 0e251021970..020dffe28c8 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs @@ -17,6 +17,8 @@ //! The imbalance type and its associates, which handles keeps everything adding up properly with //! unbalanced operations. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits. use super::{super::Imbalance as ImbalanceT, Balanced, *}; use crate::traits::{ diff --git a/substrate/frame/support/src/traits/tokens/fungible/item_of.rs b/substrate/frame/support/src/traits/tokens/fungible/item_of.rs index 37749d39600..5374cc52bab 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/item_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/item_of.rs @@ -16,6 +16,11 @@ // limitations under the License. //! Adapter to use `fungibles::*` implementations as `fungible::*`. +//! +//! This allows for a `fungibles` asset, e.g. from the `pallet_assets` pallet, to be used when a +//! `fungible` asset is expected. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits. use super::*; use crate::traits::{ diff --git a/substrate/frame/support/src/traits/tokens/fungible/mod.rs b/substrate/frame/support/src/traits/tokens/fungible/mod.rs index ba4a2e5e21a..4a0cda2dbc7 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/mod.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/mod.rs @@ -17,26 +17,135 @@ //! The traits for dealing with a single fungible token class and any associated types. //! -//! ### User-implememted traits -//! - `Inspect`: Regular balance inspector functions. -//! - `Unbalanced`: Low-level balance mutating functions. Does not guarantee proper book-keeping and -//! so should not be called into directly from application code. Other traits depend on this and -//! provide default implementations based on it. -//! - `UnbalancedHold`: Low-level balance mutating functions for balances placed on hold. Does not +//! Also see the [`frame_tokens`] reference docs for more information about the place of +//! `fungible` traits in Substrate. +//! +//! # Avaliable Traits +//! - [`Inspect`]: Regular balance inspector functions. +//! - [`Unbalanced`]: Low-level balance mutating functions. Does not guarantee proper book-keeping +//! and so should not be called into directly from application code. Other traits depend on this +//! and provide default implementations based on it. +//! - [`UnbalancedHold`]: Low-level balance mutating functions for balances placed on hold. Does not //! guarantee proper book-keeping and so should not be called into directly from application code. //! Other traits depend on this and provide default implementations based on it. -//! - `Mutate`: Regular balance mutator functions. Pre-implemented using `Unbalanced`, though the -//! `done_*` functions should likely be reimplemented in case you want to do something following -//! the operation such as emit events. -//! - `InspectHold`: Inspector functions for balances on hold. -//! - `MutateHold`: Mutator functions for balances on hold. Mostly pre-implemented using -//! `UnbalancedHold`. -//! - `InspectFreeze`: Inspector functions for frozen balance. -//! - `MutateFreeze`: Mutator functions for frozen balance. -//! - `Balanced`: One-sided mutator functions for regular balances, which return imbalance objects +//! - [`Mutate`]: Regular balance mutator functions. Pre-implemented using [`Unbalanced`], though +//! the `done_*` functions should likely be reimplemented in case you want to do something +//! following the operation such as emit events. +//! - [`InspectHold`]: Inspector functions for balances on hold. +//! - [`MutateHold`]: Mutator functions for balances on hold. Mostly pre-implemented using +//! [`UnbalancedHold`]. +//! - [`InspectFreeze`]: Inspector functions for frozen balance. +//! - [`MutateFreeze`]: Mutator functions for frozen balance. +//! - [`Balanced`]: One-sided mutator functions for regular balances, which return imbalance objects //! which guarantee eventual book-keeping. May be useful for some sophisticated operations where //! funds must be removed from an account before it is known precisely what should be done with //! them. +//! +//! ## Terminology +//! +//! - **Total Issuance**: The total number of units in existence in a system. +//! +//! - **Total Balance**: The sum of an account's free and held balances. +//! +//! - **Free Balance**: A portion of an account's total balance that is not held. Note this is +//! distinct from the Spendable Balance, which represents how much Balance the user can actually +//! transfer. +//! +//! - **Held Balance**: Held balance still belongs to the account holder, but is suspended. It can +//! be slashed, but only after all the free balance has been slashed. +//! +//! Multiple holds stack rather than overlay. This means that if an account has +//! 3 holds for 100 units, the account can spend its funds for any reason down to 300 units, at +//! which point the holds will start to come into play. +//! +//! - **Frozen Balance**: A freeze on a specified amount of an account's free balance until a +//! specified block number. +//! +//! Multiple freezes always operate over the same funds, so they "overlay" rather than +//! "stack". This means that if an account has 3 freezes for 100 units, the account can spend its +//! funds for any reason down to 100 units, at which point the freezes will start to come into +//! play. +//! +//! - **Minimum Balance (a.k.a. Existential Deposit, a.k.a. ED)**: The minimum balance required to +//! create or keep an account open. This is to prevent "dust accounts" from filling storage. When +//! the free plus the held balance (i.e. the total balance) falls below this, then the account is +//! said to be dead. It loses its functionality as well as any prior history and all information +//! on it is removed from the chain's state. No account should ever have a total balance that is +//! strictly between 0 and the existential deposit (exclusive). If this ever happens, it indicates +//! either a bug in the implementation of this trait or an erroneous raw mutation of storage. +//! +//! - **Untouchable Balance**: The part of a user's free balance they cannot spend, due to ED or +//! Freeze(s). +//! +//! - **Spendable Balance**: The part of a user's free balance they can actually transfer, after +//! accounting for Holds and Freezes. +//! +//! - **Imbalance**: A condition when some funds were credited or debited without equal and opposite +//! accounting (i.e. a difference between total issuance and account balances). Functions that +//! result in an imbalance will return an object of the [`imbalance::Credit`] or +//! [`imbalance::Debt`] traits that can be managed within your runtime logic. +//! +//! If an imbalance is simply dropped, it should automatically maintain any book-keeping such as +//! total issuance. +//! +//! ## Visualising Balance Components Together 💫 +//! +//! ```ignore +//! |__total__________________________________| +//! |__on_hold__|_____________free____________| +//! |__________frozen___________| +//! |__on_hold__|__ed__| +//! |__untouchable__|__spendable__| +//! ``` +//! +//! ## Holds and Freezes +//! +//! Both holds and freezes are used to prevent an account from using some of its balance. +//! +//! The primary distinction between the two are that: +//! - Holds are cumulative (do not overlap) and are distinct from the free balance +//! - Freezes are not cumulative, and can overlap with each other or with holds +//! +//! ```ignore +//! |__total_____________________________| +//! |__hold_a__|__hold_b__|_____free_____| +//! |__on_hold____________| // <- the sum of all holds +//! |__freeze_a_______________| +//! |__freeze_b____| +//! |__freeze_c________| +//! |__frozen_________________| // <- the max of all freezes +//! ``` +//! +//! Holds are designed to be infallibly slashed, meaning that any logic using a `Freeze` +//! must handle the possibility of the frozen amount being reduced, potentially to zero. A +//! permissionless function should be provided in order to allow bookkeeping to be updated in this +//! instance. E.g. some balance is frozen when it is used for voting, one could use held balance for +//! voting, but nothing prevents this frozen balance from being reduced if the overlapping hold is +//! slashed. +//! +//! Every Hold and Freeze is accompanied by a unique `Reason`, making it clear for each instance +//! what the originating pallet and purpose is. These reasons are amalgomated into a single enum +//! `RuntimeHoldReason` and `RuntimeFreezeReason` respectively, when the runtime is compiled. +//! +//! Note that `Hold` and `Freeze` reasons should remain in your runtime for as long as storage +//! could exist in your runtime with those reasons, otherwise your runtime state could become +//! undecodable. +//! +//! ### Should I use a Hold or Freeze? +//! +//! If you require a balance to be infaillibly slashed, then you should use Holds. +//! +//! If you require setting a minimum account balance amount, then you should use a Freezes. Note +//! Freezes do not carry the same guarantees as Holds. Although the account cannot voluntarily +//! reduce their balance below the largest freeze, if Holds on the account are slashed then the +//! balance could drop below the freeze amount. +//! +//! ## Sets of Tokens +//! +//! For managing sets of tokens, see the [`fungibles`](`frame_support::traits::fungibles`) trait +//! which is a wrapper around this trait but supporting multiple asset instances. +//! +//! [`frame_tokens`]: ../../../../polkadot_sdk_docs/reference_docs/frame_tokens/index.html pub mod conformance_tests; pub mod freeze; diff --git a/substrate/frame/support/src/traits/tokens/fungible/regular.rs b/substrate/frame/support/src/traits/tokens/fungible/regular.rs index 0157b08bd13..4ed31dcf9fb 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/regular.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/regular.rs @@ -16,6 +16,8 @@ // limitations under the License. //! `Inspect` and `Mutate` traits for working with regular balances. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits. use crate::{ ensure, diff --git a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs index 33711d7a16c..575b771a614 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs @@ -17,6 +17,8 @@ //! Types to combine some `fungible::*` and `fungibles::*` implementations into one union //! `fungibles::*` implementation. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits. use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::traits::{ diff --git a/substrate/frame/support/src/traits/tokens/fungibles/approvals.rs b/substrate/frame/support/src/traits/tokens/fungibles/approvals.rs index 7a80279b019..09e3d20756a 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/approvals.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/approvals.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Inspect and Mutate traits for Asset approvals +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use crate::dispatch::DispatchResult; pub trait Inspect: super::Inspect { diff --git a/substrate/frame/support/src/traits/tokens/fungibles/enumerable.rs b/substrate/frame/support/src/traits/tokens/fungibles/enumerable.rs index 08bb784a7db..81dbb93b0b8 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/enumerable.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/enumerable.rs @@ -15,6 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! Contains an interface for enumerating assets in existence or owned by a given account. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. + /// Interface for enumerating assets in existence or owned by a given account. pub trait Inspect: super::Inspect { type AssetsIterator; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/freeze.rs b/substrate/frame/support/src/traits/tokens/fungibles/freeze.rs index b07d20d6c41..244f7005899 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/freeze.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/freeze.rs @@ -16,6 +16,8 @@ // limitations under the License. //! The traits for putting freezes within a single fungible token class. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use crate::{ensure, traits::tokens::Fortitude}; use scale_info::TypeInfo; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/hold.rs b/substrate/frame/support/src/traits/tokens/fungibles/hold.rs index 1efd1594213..ef3fef7a300 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/hold.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/hold.rs @@ -16,6 +16,8 @@ // limitations under the License. //! The traits for putting holds within a single fungible token class. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use crate::{ ensure, diff --git a/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs b/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs index 54c1e900b6e..bb0d83721a4 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs @@ -17,6 +17,8 @@ //! The imbalance type and its associates, which handles keeps everything adding up properly with //! unbalanced operations. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use super::*; use crate::traits::{ diff --git a/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs b/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs index 0e195a52318..49f6c846ccd 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Traits for creating and destroying assets. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use sp_runtime::{DispatchError, DispatchResult}; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs b/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs index ab310119e58..ab722426dad 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Inspect and Mutate traits for Asset metadata +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use crate::dispatch::DispatchResult; use sp_std::vec::Vec; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/mod.rs b/substrate/frame/support/src/traits/tokens/fungibles/mod.rs index 1db0706ba4f..2122fdeaed3 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/mod.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/mod.rs @@ -15,7 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! The traits for sets of fungible tokens and any associated types. +//! The traits for *sets* of [`fungible`](`frame_support::traits::fungible`) tokens and any +//! associated types. +//! +//! Individual tokens in the `fungibles` set may be used when a `fungible` trait is expected using +//! [`crate::traits::tokens::fungible::ItemOf`]. +//! +//! Also see the [`frame_tokens`] reference docs for more information about the place of +//! `fungible` traits in Substrate. +//! +//! [`frame_tokens`]: ../../../../polkadot_sdk_docs/reference_docs/frame_tokens/index.html pub mod approvals; mod enumerable; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/regular.rs b/substrate/frame/support/src/traits/tokens/fungibles/regular.rs index 8cc97802da6..b30e0ae3a2a 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/regular.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/regular.rs @@ -16,6 +16,8 @@ // limitations under the License. //! `Inspect` and `Mutate` traits for working with regular balances. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use sp_std::marker::PhantomData; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/roles.rs b/substrate/frame/support/src/traits/tokens/fungibles/roles.rs index 5cd1228afbc..4f95ad8368c 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/roles.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/roles.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Inspect traits for Asset roles +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. pub trait Inspect: super::Inspect { // Get owner for an AssetId. diff --git a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs index 9d2a783df2a..c8a1ec37e78 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Type to combine two `fungibles::*` implementations into one union `fungibles::*` implementation. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use frame_support::traits::{ tokens::{ -- GitLab From 256d5aefdc83928090aa2e3f8c022484fab38e0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 31 Mar 2024 22:47:01 +0200 Subject: [PATCH 064/128] Revert log level changes (#3913) Closes: https://github.com/paritytech/polkadot-sdk/issues/3906 --- polkadot/node/network/gossip-support/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs index 9f33cd5d8a3..4dfdd1f7208 100644 --- a/polkadot/node/network/gossip-support/src/lib.rs +++ b/polkadot/node/network/gossip-support/src/lib.rs @@ -508,7 +508,7 @@ where ); } let pretty = PrettyAuthorities(unconnected_authorities); - gum::info!( + gum::debug!( target: LOG_TARGET, ?connected_ratio, ?absolute_connected, -- GitLab From aa44384e05e05705cbdfacd8d73972404be4be6f Mon Sep 17 00:00:00 2001 From: gemini132 <164285545+gemini132@users.noreply.github.com> Date: Mon, 1 Apr 2024 06:28:38 +0800 Subject: [PATCH 065/128] Fix two typos (#3812) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher -- GitLab From a2c9ab8c043221f4902739d678739b1fa9319cef Mon Sep 17 00:00:00 2001 From: Matteo Muraca <56828990+muraca@users.noreply.github.com> Date: Mon, 1 Apr 2024 07:17:20 +0200 Subject: [PATCH 066/128] Removed `pallet::getter` usage from `pallet-alliance` (#3738) Part of #3326 cc @kianenigma @ggwpez @liamaharon polkadot address: 12poSUQPtcF1HUPQGY3zZu2P8emuW9YnsPduA4XG3oCEfJVp --------- Signed-off-by: Matteo Muraca Co-authored-by: Liam Aharon Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- prdoc/pr_3738.prdoc | 14 +++++ substrate/frame/alliance/src/benchmarking.rs | 10 ++-- substrate/frame/alliance/src/lib.rs | 7 --- substrate/frame/alliance/src/migration.rs | 12 ++--- substrate/frame/alliance/src/tests.rs | 55 +++++++++++--------- 5 files changed, 54 insertions(+), 44 deletions(-) create mode 100644 prdoc/pr_3738.prdoc diff --git a/prdoc/pr_3738.prdoc b/prdoc/pr_3738.prdoc new file mode 100644 index 00000000000..cbf19b95c36 --- /dev/null +++ b/prdoc/pr_3738.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from `pallet-alliance` + +doc: + - audience: Runtime Dev + description: | + This PR removes `pallet::getter` usage from `pallet-alliance`, and updates dependant code accordingly. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-alliance + bump: major diff --git a/substrate/frame/alliance/src/benchmarking.rs b/substrate/frame/alliance/src/benchmarking.rs index 4ccd0fc08f6..710c32a848d 100644 --- a/substrate/frame/alliance/src/benchmarking.rs +++ b/substrate/frame/alliance/src/benchmarking.rs @@ -505,8 +505,8 @@ mod benchmarks { assert_last_event::( Event::MembersInitialized { fellows: fellows.clone(), allies: allies.clone() }.into(), ); - assert_eq!(Alliance::::members(MemberRole::Fellow), fellows); - assert_eq!(Alliance::::members(MemberRole::Ally), allies); + assert_eq!(Members::::get(MemberRole::Fellow), fellows); + assert_eq!(Members::::get(MemberRole::Ally), allies); Ok(()) } @@ -563,7 +563,7 @@ mod benchmarks { { call.dispatch_bypass_filter(origin)?; } - assert_eq!(Alliance::::rule(), Some(rule.clone())); + assert_eq!(Rule::::get(), Some(rule.clone())); assert_last_event::(Event::NewRuleSet { rule }.into()); Ok(()) } @@ -583,7 +583,7 @@ mod benchmarks { call.dispatch_bypass_filter(origin)?; } - assert!(Alliance::::announcements().contains(&announcement)); + assert!(Announcements::::get().contains(&announcement)); assert_last_event::(Event::Announced { announcement }.into()); Ok(()) } @@ -606,7 +606,7 @@ mod benchmarks { call.dispatch_bypass_filter(origin)?; } - assert!(!Alliance::::announcements().contains(&announcement)); + assert!(!Announcements::::get().contains(&announcement)); assert_last_event::(Event::AnnouncementRemoved { announcement }.into()); Ok(()) } diff --git a/substrate/frame/alliance/src/lib.rs b/substrate/frame/alliance/src/lib.rs index 414d550c53a..1f06241e9c8 100644 --- a/substrate/frame/alliance/src/lib.rs +++ b/substrate/frame/alliance/src/lib.rs @@ -442,24 +442,20 @@ pub mod pallet { /// The IPFS CID of the alliance rule. /// Fellows can propose a new rule with a super-majority. #[pallet::storage] - #[pallet::getter(fn rule)] pub type Rule, I: 'static = ()> = StorageValue<_, Cid, OptionQuery>; /// The current IPFS CIDs of any announcements. #[pallet::storage] - #[pallet::getter(fn announcements)] pub type Announcements, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; /// Maps members to their candidacy deposit. #[pallet::storage] - #[pallet::getter(fn deposit_of)] pub type DepositOf, I: 'static = ()> = StorageMap<_, Blake2_128Concat, T::AccountId, BalanceOf, OptionQuery>; /// Maps member type to members of each type. #[pallet::storage] - #[pallet::getter(fn members)] pub type Members, I: 'static = ()> = StorageMap< _, Twox64Concat, @@ -471,20 +467,17 @@ pub mod pallet { /// A set of members who gave a retirement notice. They can retire after the end of retirement /// period stored as a future block number. #[pallet::storage] - #[pallet::getter(fn retiring_members)] pub type RetiringMembers, I: 'static = ()> = StorageMap<_, Blake2_128Concat, T::AccountId, BlockNumberFor, OptionQuery>; /// The current list of accounts deemed unscrupulous. These accounts non grata cannot submit /// candidacy. #[pallet::storage] - #[pallet::getter(fn unscrupulous_accounts)] pub type UnscrupulousAccounts, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; /// The current list of websites deemed unscrupulous. #[pallet::storage] - #[pallet::getter(fn unscrupulous_websites)] pub type UnscrupulousWebsites, I: 'static = ()> = StorageValue<_, BoundedVec, T::MaxUnscrupulousItems>, ValueQuery>; diff --git a/substrate/frame/alliance/src/migration.rs b/substrate/frame/alliance/src/migration.rs index 432f09a16f4..b4ecc181944 100644 --- a/substrate/frame/alliance/src/migration.rs +++ b/substrate/frame/alliance/src/migration.rs @@ -162,18 +162,18 @@ pub(crate) mod v1_to_v2 { #[cfg(test)] mod test { use super::*; - use crate::{mock::*, MemberRole}; + use crate::{mock::*, MemberRole, Members}; #[test] fn migration_v1_to_v2_works() { new_test_ext().execute_with(|| { assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(4))); - assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(Members::::get(MemberRole::Ally), vec![4]); + assert_eq!(Members::::get(MemberRole::Fellow), vec![1, 2, 3]); v1_to_v2::migrate::(); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3, 4]); - assert_eq!(Alliance::members(MemberRole::Ally), vec![]); - assert_eq!(Alliance::members(MemberRole::Retiring), vec![]); + assert_eq!(Members::::get(MemberRole::Fellow), vec![1, 2, 3, 4]); + assert_eq!(Members::::get(MemberRole::Ally), vec![]); + assert_eq!(Members::::get(MemberRole::Retiring), vec![]); }); } } diff --git a/substrate/frame/alliance/src/tests.rs b/substrate/frame/alliance/src/tests.rs index c65f10228e7..edb515b8115 100644 --- a/substrate/frame/alliance/src/tests.rs +++ b/substrate/frame/alliance/src/tests.rs @@ -21,7 +21,7 @@ use frame_support::{assert_noop, assert_ok, error::BadOrigin}; use frame_system::{EventRecord, Phase}; use super::*; -use crate::mock::*; +use crate::{self as alliance, mock::*}; type AllianceMotionEvent = pallet_collective::Event; @@ -118,7 +118,7 @@ fn disband_works() { // join alliance and reserve funds assert_eq!(Balances::free_balance(9), 1000 - id_deposit); assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(9))); - assert_eq!(Alliance::deposit_of(9), Some(expected_join_deposit)); + assert_eq!(alliance::DepositOf::::get(9), Some(expected_join_deposit)); assert_eq!(Balances::free_balance(9), 1000 - id_deposit - expected_join_deposit); assert!(Alliance::is_member_of(&9, MemberRole::Ally)); @@ -314,7 +314,7 @@ fn set_rule_works() { new_test_ext().execute_with(|| { let cid = test_cid(); assert_ok!(Alliance::set_rule(RuntimeOrigin::signed(1), cid.clone())); - assert_eq!(Alliance::rule(), Some(cid.clone())); + assert_eq!(alliance::Rule::::get(), Some(cid.clone())); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::NewRuleSet { rule: cid, @@ -330,7 +330,7 @@ fn announce_works() { assert_noop!(Alliance::announce(RuntimeOrigin::signed(2), cid.clone()), BadOrigin); assert_ok!(Alliance::announce(RuntimeOrigin::signed(3), cid.clone())); - assert_eq!(Alliance::announcements(), vec![cid.clone()]); + assert_eq!(alliance::Announcements::::get(), vec![cid.clone()]); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::Announced { announcement: cid, @@ -343,7 +343,7 @@ fn remove_announcement_works() { new_test_ext().execute_with(|| { let cid = test_cid(); assert_ok!(Alliance::announce(RuntimeOrigin::signed(3), cid.clone())); - assert_eq!(Alliance::announcements(), vec![cid.clone()]); + assert_eq!(alliance::Announcements::::get(), vec![cid.clone()]); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::Announced { announcement: cid.clone(), })); @@ -351,7 +351,7 @@ fn remove_announcement_works() { System::set_block_number(2); assert_ok!(Alliance::remove_announcement(RuntimeOrigin::signed(3), cid.clone())); - assert_eq!(Alliance::announcements(), vec![]); + assert_eq!(alliance::Announcements::::get(), vec![]); System::assert_last_event(mock::RuntimeEvent::Alliance( crate::Event::AnnouncementRemoved { announcement: cid }, )); @@ -394,8 +394,8 @@ fn join_alliance_works() { // success to submit assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(4))); assert_eq!(Balances::free_balance(4), 1000 - id_deposit - join_deposit); - assert_eq!(Alliance::deposit_of(4), Some(25)); - assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); + assert_eq!(alliance::DepositOf::::get(4), Some(25)); + assert_eq!(alliance::Members::::get(MemberRole::Ally), vec![4]); // check already member assert_noop!( @@ -449,8 +449,8 @@ fn nominate_ally_works() { // success to nominate assert_ok!(Alliance::nominate_ally(RuntimeOrigin::signed(1), 4)); - assert_eq!(Alliance::deposit_of(4), None); - assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); + assert_eq!(alliance::DepositOf::::get(4), None); + assert_eq!(alliance::Members::::get(MemberRole::Ally), vec![4]); // check already member assert_noop!( @@ -482,12 +482,12 @@ fn elevate_ally_works() { ); assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(4))); - assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Ally), vec![4]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::elevate_ally(RuntimeOrigin::signed(2), 4)); - assert_eq!(Alliance::members(MemberRole::Ally), Vec::::new()); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3, 4]); + assert_eq!(alliance::Members::::get(MemberRole::Ally), Vec::::new()); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3, 4]); }); } @@ -499,10 +499,10 @@ fn give_retirement_notice_work() { Error::::NotMember ); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::give_retirement_notice(RuntimeOrigin::signed(3))); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2]); - assert_eq!(Alliance::members(MemberRole::Retiring), vec![3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2]); + assert_eq!(alliance::Members::::get(MemberRole::Retiring), vec![3]); System::assert_last_event(mock::RuntimeEvent::Alliance( crate::Event::MemberRetirementPeriodStarted { member: (3) }, )); @@ -527,7 +527,7 @@ fn retire_works() { Error::::RetirementNoticeNotGiven ); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::give_retirement_notice(RuntimeOrigin::signed(3))); assert_noop!( Alliance::retire(RuntimeOrigin::signed(3)), @@ -535,7 +535,7 @@ fn retire_works() { ); System::set_block_number(System::block_number() + RetirementPeriod::get()); assert_ok!(Alliance::retire(RuntimeOrigin::signed(3))); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2]); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::MemberRetired { member: (3), unreserved: None, @@ -551,7 +551,7 @@ fn retire_works() { #[test] fn abdicate_works() { new_test_ext().execute_with(|| { - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::abdicate_fellow_status(RuntimeOrigin::signed(3))); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::FellowAbdicated { @@ -573,9 +573,9 @@ fn kick_member_works() { ); >::insert(2, 25); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::kick_member(RuntimeOrigin::signed(2), 2)); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 3]); assert_eq!(>::get(2), None); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::MemberKicked { member: (2), @@ -596,8 +596,11 @@ fn add_unscrupulous_items_works() { UnscrupulousItem::Website("abc".as_bytes().to_vec().try_into().unwrap()) ] )); - assert_eq!(Alliance::unscrupulous_accounts().into_inner(), vec![3]); - assert_eq!(Alliance::unscrupulous_websites().into_inner(), vec!["abc".as_bytes().to_vec()]); + assert_eq!(alliance::UnscrupulousAccounts::::get().into_inner(), vec![3]); + assert_eq!( + alliance::UnscrupulousWebsites::::get().into_inner(), + vec!["abc".as_bytes().to_vec()] + ); assert_noop!( Alliance::add_unscrupulous_items( @@ -629,12 +632,12 @@ fn remove_unscrupulous_items_works() { RuntimeOrigin::signed(3), vec![UnscrupulousItem::AccountId(3)] )); - assert_eq!(Alliance::unscrupulous_accounts(), vec![3]); + assert_eq!(alliance::UnscrupulousAccounts::::get(), vec![3]); assert_ok!(Alliance::remove_unscrupulous_items( RuntimeOrigin::signed(3), vec![UnscrupulousItem::AccountId(3)] )); - assert_eq!(Alliance::unscrupulous_accounts(), Vec::::new()); + assert_eq!(alliance::UnscrupulousAccounts::::get(), Vec::::new()); }); } -- GitLab From e0c081dbd46c1e6edca1ce2c62298f5f3622afdd Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Mon, 1 Apr 2024 09:29:22 +0300 Subject: [PATCH 067/128] network:bridge: fix peer_count metric (#3711) The metric records the current protocol_version of the validator that just connected with the peer_map.len(), which contains all peers that connected, that has the effect the metric will be wrong since it won't tell us how many peers we have connected per version because it will always record the total number of peers Fix this by counting by version inside peer_map, additionally because that might be a bit heavier than len(), publish it only on-active leaves. --------- Signed-off-by: Alexandru Gheorghe --- polkadot/node/network/bridge/src/lib.rs | 24 ++++++++++++++++++++++ polkadot/node/network/bridge/src/rx/mod.rs | 6 +----- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/polkadot/node/network/bridge/src/lib.rs b/polkadot/node/network/bridge/src/lib.rs index ddce99d5c2a..0305aaa067c 100644 --- a/polkadot/node/network/bridge/src/lib.rs +++ b/polkadot/node/network/bridge/src/lib.rs @@ -102,6 +102,30 @@ struct SharedInner { collation_peers: HashMap, } +// Counts the number of peers that are connectioned using `version` +fn count_peers_by_version(peers: &HashMap) -> HashMap { + let mut by_version_count = HashMap::new(); + for peer in peers.values() { + *(by_version_count.entry(peer.version).or_default()) += 1; + } + by_version_count +} + +// Notes the peer count +fn note_peers_count(metrics: &Metrics, shared: &Shared) { + let guard = shared.0.lock(); + let validation_stats = count_peers_by_version(&guard.validation_peers); + let collation_stats = count_peers_by_version(&guard.collation_peers); + + for (version, count) in validation_stats { + metrics.note_peer_count(PeerSet::Validation, version, count) + } + + for (version, count) in collation_stats { + metrics.note_peer_count(PeerSet::Collation, version, count) + } +} + pub(crate) enum Mode { Syncing(Box), Active, diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 11ac73259e3..0a4497fc4b5 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -262,7 +262,6 @@ async fn handle_validation_message( } metrics.on_peer_connected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); shared.local_view.clone().unwrap_or(View::default()) }; @@ -320,8 +319,6 @@ async fn handle_validation_message( let w = peer_map.remove(&peer).is_some(); metrics.on_peer_disconnected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); - w }; @@ -524,7 +521,6 @@ async fn handle_collation_message( } metrics.on_peer_connected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); shared.local_view.clone().unwrap_or(View::default()) }; @@ -575,7 +571,6 @@ async fn handle_collation_message( let w = peer_map.remove(&peer).is_some(); metrics.on_peer_disconnected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); w }; @@ -832,6 +827,7 @@ where &metrics, ¬ification_sinks, ); + note_peers_count(&metrics, &shared); } } }, -- GitLab From 8d305343c7a488d3e8504600b5d9f94af899f25b Mon Sep 17 00:00:00 2001 From: Alessandro Siniscalchi Date: Mon, 1 Apr 2024 10:54:23 +0200 Subject: [PATCH 068/128] [parachain-template] pallet configurations into `mod configs` (#3809) This PR introduces a refactor of the parachain runtime configuration by consolidating all pallet configurations into a new module named `configs`. This change aims to improve the readability and maintainability of the runtime configuration by centralizing all configuration parameters. ## Changes - **Creation of `configs.rs`**: A new file `configs.rs` has been added under `templates/parachain/runtime/src/`, containing all the runtime configurations previously scattered across `lib.rs`. - **Refactoring of `lib.rs`**: The `lib.rs` file has been significantly slimmed down by removing the inline pallet configurations and importing them from `configs.rs` instead. - **Optimization of Import Statements**: Reorganized import statements to clarify the runtime's dependency structure. ### Benefits - **Improved Readability**: With configurations being centralized, developers can now easily locate and review runtime parameters without navigating through the `lib.rs` file. This refactor does not introduce any changes to the runtime logic but improves the project structure for better development experience. --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- templates/parachain/runtime/src/apis.rs | 2 +- .../parachain/runtime/src/configs/mod.rs | 311 ++++++++++++++++++ .../runtime/src/{ => configs}/xcm_config.rs | 2 +- templates/parachain/runtime/src/lib.rs | 276 +--------------- 4 files changed, 317 insertions(+), 274 deletions(-) create mode 100644 templates/parachain/runtime/src/configs/mod.rs rename templates/parachain/runtime/src/{ => configs}/xcm_config.rs (99%) diff --git a/templates/parachain/runtime/src/apis.rs b/templates/parachain/runtime/src/apis.rs index aa0cae843c3..74c7476e152 100644 --- a/templates/parachain/runtime/src/apis.rs +++ b/templates/parachain/runtime/src/apis.rs @@ -193,7 +193,7 @@ impl_runtime_apis! { #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { - use super::RuntimeBlockWeights; + use super::configs::RuntimeBlockWeights; let weight = Executive::try_runtime_upgrade(checks).unwrap(); (weight, RuntimeBlockWeights::get().max_block) diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs new file mode 100644 index 00000000000..e2c51e07d37 --- /dev/null +++ b/templates/parachain/runtime/src/configs/mod.rs @@ -0,0 +1,311 @@ +// This is free and unencumbered software released into the public domain. +// +// Anyone is free to copy, modify, publish, use, compile, sell, or +// distribute this software, either in source code form or as a compiled +// binary, for any purpose, commercial or non-commercial, and by any +// means. +// +// In jurisdictions that recognize copyright laws, the author or authors +// of this software dedicate any and all copyright interest in the +// software to the public domain. We make this dedication for the benefit +// of the public at large and to the detriment of our heirs and +// successors. We intend this dedication to be an overt act of +// relinquishment in perpetuity of all present and future rights to this +// software under copyright law. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// +// For more information, please refer to + +mod xcm_config; + +// Substrate and Polkadot dependencies +use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use frame_support::{ + derive_impl, + dispatch::DispatchClass, + parameter_types, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, + weights::{ConstantMultiplier, Weight}, + PalletId, +}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + EnsureRoot, +}; +use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; +use polkadot_runtime_common::{ + xcm_sender::NoPriceForMessageDelivery, BlockHashCount, SlowAdjustingFeeUpdate, +}; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_runtime::Perbill; +use sp_version::RuntimeVersion; +use xcm::latest::prelude::BodyId; + +// Local module imports +use super::{ + weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}, + AccountId, Aura, Balance, Balances, Block, BlockNumber, CollatorSelection, Hash, MessageQueue, + Nonce, PalletInfo, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, RuntimeFreezeReason, + RuntimeHoldReason, RuntimeOrigin, RuntimeTask, Session, SessionKeys, System, WeightToFee, + XcmpQueue, AVERAGE_ON_INITIALIZE_RATIO, BLOCK_PROCESSING_VELOCITY, EXISTENTIAL_DEPOSIT, HOURS, + MAXIMUM_BLOCK_WEIGHT, MICROUNIT, NORMAL_DISPATCH_RATIO, RELAY_CHAIN_SLOT_DURATION_MILLIS, + SLOT_DURATION, UNINCLUDED_SEGMENT_CAPACITY, VERSION, +}; +use xcm_config::{RelayLocation, XcmOriginToTransactDispatchOrigin}; + +parameter_types! { + pub const Version: RuntimeVersion = VERSION; + + // This part is copied from Substrate's `bin/node/runtime/src/lib.rs`. + // The `RuntimeBlockLength` and `RuntimeBlockWeights` exist here because the + // `DeletionWeightLimit` and `DeletionQueueDepth` depend on those to parameterize + // the lazy contract deletion. + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); + pub const SS58Prefix: u16 = 42; +} + +/// The default types are being injected by [`derive_impl`](`frame_support::derive_impl`) from +/// [`ParaChainDefaultConfig`](`struct@frame_system::config_preludes::ParaChainDefaultConfig`), +/// but overridden as needed. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig)] +impl frame_system::Config for Runtime { + /// The identifier used to distinguish between accounts. + type AccountId = AccountId; + /// The index type for storing how many extrinsics an account has signed. + type Nonce = Nonce; + /// The type for hashing blocks and tries. + type Hash = Hash; + /// The block type. + type Block = Block; + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + type BlockHashCount = BlockHashCount; + /// Runtime version. + type Version = Version; + /// The data to be stored in an account. + type AccountData = pallet_balances::AccountData; + /// The weight of database operations that the runtime can invoke. + type DbWeight = RocksDbWeight; + /// Block & extrinsics weights: base values and limits. + type BlockWeights = RuntimeBlockWeights; + /// The maximum length of a block (in bytes). + type BlockLength = RuntimeBlockLength; + /// This is used as an identifier of the chain. 42 is the generic substrate prefix. + type SS58Prefix = SS58Prefix; + /// The action to take on a Runtime Upgrade + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = (); +} + +impl pallet_authorship::Config for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; + type EventHandler = (CollatorSelection,); +} + +parameter_types! { + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = ConstU32<50>; + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = pallet_balances::weights::SubstrateWeight; + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type FreezeIdentifier = (); + type MaxFreezes = ConstU32<0>; +} + +parameter_types! { + /// Relay Chain `TransactionByteFee` / 10 + pub const TransactionByteFee: Balance = 10 * MICROUNIT; +} + +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; + type WeightToFee = WeightToFee; + type LengthToFee = ConstantMultiplier; + type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type OperationalFeeMultiplier = ConstU8<5>; +} + +impl pallet_sudo::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type WeightInfo = (); +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; +} + +impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = (); + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = parachain_info::Pallet; + type OutboundXcmpMessageSource = XcmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type ReservedDmpWeight = ReservedDmpWeight; + type XcmpMessageHandler = XcmpQueue; + type ReservedXcmpWeight = ReservedXcmpWeight; + type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, + >; +} + +impl parachain_info::Config for Runtime {} + +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); +} + +impl cumulus_pallet_aura_ext::Config for Runtime {} + +impl cumulus_pallet_xcmp_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ChannelInfo = ParachainSystem; + type VersionWrapper = (); + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type ControllerOrigin = EnsureRoot; + type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; + type WeightInfo = (); + type PriceForSiblingDelivery = NoPriceForMessageDelivery; +} + +parameter_types! { + pub const Period: u32 = 6 * HOURS; + pub const Offset: u32 = 0; +} + +impl pallet_session::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValidatorId = ::AccountId; + // we don't have stash and controller, thus we don't need the convert as well. + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = CollatorSelection; + // Essentially just Aura, but let's be pedantic. + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type WeightInfo = (); +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; +} + +parameter_types! { + pub const PotId: PalletId = PalletId(*b"PotStake"); + pub const SessionLength: BlockNumber = 6 * HOURS; + // StakingAdmin pluralistic body. + pub const StakingAdminBodyId: BodyId = BodyId::Defense; +} + +/// We allow root and the StakingAdmin to execute privileged collator selection operations. +pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< + EnsureRoot, + EnsureXcm>, +>; + +impl pallet_collator_selection::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type UpdateOrigin = CollatorSelectionUpdateOrigin; + type PotId = PotId; + type MaxCandidates = ConstU32<100>; + type MinEligibleCollators = ConstU32<4>; + type MaxInvulnerables = ConstU32<20>; + // should be a multiple of session or things will get inconsistent + type KickThreshold = Period; + type ValidatorId = ::AccountId; + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ValidatorRegistration = Session; + type WeightInfo = (); +} + +/// Configure the pallet template in pallets/template. +impl pallet_parachain_template::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_parachain_template::weights::SubstrateWeight; +} diff --git a/templates/parachain/runtime/src/xcm_config.rs b/templates/parachain/runtime/src/configs/xcm_config.rs similarity index 99% rename from templates/parachain/runtime/src/xcm_config.rs rename to templates/parachain/runtime/src/configs/xcm_config.rs index 7dce7164888..13da2363b05 100644 --- a/templates/parachain/runtime/src/xcm_config.rs +++ b/templates/parachain/runtime/src/configs/xcm_config.rs @@ -1,4 +1,4 @@ -use super::{ +use crate::{ AccountId, AllPalletsWithSystem, Balances, ParachainInfo, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, WeightToFee, XcmpQueue, }; diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index 5cfee123b01..e2da9309ecc 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -7,11 +7,9 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod apis; +mod configs; mod weights; -pub mod xcm_config; -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use smallvec::smallvec; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, @@ -24,38 +22,20 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ - construct_runtime, derive_impl, - dispatch::DispatchClass, - parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, + construct_runtime, weights::{ - constants::WEIGHT_REF_TIME_PER_SECOND, ConstantMultiplier, Weight, WeightToFeeCoefficient, + constants::WEIGHT_REF_TIME_PER_SECOND, Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }, - PalletId, }; -use frame_system::{ - limits::{BlockLength, BlockWeights}, - EnsureRoot, -}; -use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; -use xcm_config::{RelayLocation, XcmOriginToTransactDispatchOrigin}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; -// Polkadot imports -use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; - -use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; - -// XCM Imports -use xcm::latest::prelude::BodyId; +use weights::ExtrinsicBaseWeight; /// Import the template pallet. pub use pallet_parachain_template; @@ -241,254 +221,6 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } -parameter_types! { - pub const Version: RuntimeVersion = VERSION; - - // This part is copied from Substrate's `bin/node/runtime/src/lib.rs`. - // The `RuntimeBlockLength` and `RuntimeBlockWeights` exist here because the - // `DeletionWeightLimit` and `DeletionQueueDepth` depend on those to parameterize - // the lazy contract deletion. - pub RuntimeBlockLength: BlockLength = - BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); - pub const SS58Prefix: u16 = 42; -} - -/// The default types are being injected by [`derive_impl`](`frame_support::derive_impl`) from -/// [`ParaChainDefaultConfig`](`struct@frame_system::config_preludes::ParaChainDefaultConfig`), -/// but overridden as needed. -#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig)] -impl frame_system::Config for Runtime { - /// The identifier used to distinguish between accounts. - type AccountId = AccountId; - /// The index type for storing how many extrinsics an account has signed. - type Nonce = Nonce; - /// The type for hashing blocks and tries. - type Hash = Hash; - /// The block type. - type Block = Block; - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount = BlockHashCount; - /// Runtime version. - type Version = Version; - /// The data to be stored in an account. - type AccountData = pallet_balances::AccountData; - /// The weight of database operations that the runtime can invoke. - type DbWeight = RocksDbWeight; - /// Block & extrinsics weights: base values and limits. - type BlockWeights = RuntimeBlockWeights; - /// The maximum length of a block (in bytes). - type BlockLength = RuntimeBlockLength; - /// This is used as an identifier of the chain. 42 is the generic substrate prefix. - type SS58Prefix = SS58Prefix; - /// The action to take on a Runtime Upgrade - type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; - type WeightInfo = (); -} - -impl pallet_authorship::Config for Runtime { - type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = (CollatorSelection,); -} - -parameter_types! { - pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; -} - -impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<50>; - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = pallet_balances::weights::SubstrateWeight; - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; -} - -parameter_types! { - /// Relay Chain `TransactionByteFee` / 10 - pub const TransactionByteFee: Balance = 10 * MICROUNIT; -} - -impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; - type WeightToFee = WeightToFee; - type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; - type OperationalFeeMultiplier = ConstU8<5>; -} - -impl pallet_sudo::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type WeightInfo = (); -} - -parameter_types! { - pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); - pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); - pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; -} - -impl cumulus_pallet_parachain_system::Config for Runtime { - type WeightInfo = (); - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = parachain_info::Pallet; - type OutboundXcmpMessageSource = XcmpQueue; - type DmpQueue = frame_support::traits::EnqueueWithOrigin; - type ReservedDmpWeight = ReservedDmpWeight; - type XcmpMessageHandler = XcmpQueue; - type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; -} - -impl parachain_info::Config for Runtime {} - -parameter_types! { - pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; -} - -impl pallet_message_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = (); - #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = xcm_builder::ProcessXcmMessage< - AggregateMessageOrigin, - xcm_executor::XcmExecutor, - RuntimeCall, - >; - type Size = u32; - // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: - type QueueChangeHandler = NarrowOriginToSibling; - type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; - type MaxStale = sp_core::ConstU32<8>; - type ServiceWeight = MessageQueueServiceWeight; - type IdleMaxServiceWeight = (); -} - -impl cumulus_pallet_aura_ext::Config for Runtime {} - -impl cumulus_pallet_xcmp_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ChannelInfo = ParachainSystem; - type VersionWrapper = (); - // Enqueue XCMP messages from siblings for later processing. - type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; - type ControllerOrigin = EnsureRoot; - type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; - type WeightInfo = (); - type PriceForSiblingDelivery = NoPriceForMessageDelivery; -} - -parameter_types! { - pub const Period: u32 = 6 * HOURS; - pub const Offset: u32 = 0; -} - -impl pallet_session::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ValidatorId = ::AccountId; - // we don't have stash and controller, thus we don't need the convert as well. - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ShouldEndSession = pallet_session::PeriodicSessions; - type NextSessionRotation = pallet_session::PeriodicSessions; - type SessionManager = CollatorSelection; - // Essentially just Aura, but let's be pedantic. - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - type WeightInfo = (); -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type DisabledValidators = (); - type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; -} - -parameter_types! { - pub const PotId: PalletId = PalletId(*b"PotStake"); - pub const SessionLength: BlockNumber = 6 * HOURS; - // StakingAdmin pluralistic body. - pub const StakingAdminBodyId: BodyId = BodyId::Defense; -} - -/// We allow root and the StakingAdmin to execute privileged collator selection operations. -pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, ->; - -impl pallet_collator_selection::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type UpdateOrigin = CollatorSelectionUpdateOrigin; - type PotId = PotId; - type MaxCandidates = ConstU32<100>; - type MinEligibleCollators = ConstU32<4>; - type MaxInvulnerables = ConstU32<20>; - // should be a multiple of session or things will get inconsistent - type KickThreshold = Period; - type ValidatorId = ::AccountId; - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ValidatorRegistration = Session; - type WeightInfo = (); -} - -/// Configure the pallet template in pallets/template. -impl pallet_parachain_template::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_parachain_template::weights::SubstrateWeight; -} - // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime { -- GitLab From 07720dd1208e31f45cb20317cb15aa789428508b Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Mon, 1 Apr 2024 12:23:44 +0300 Subject: [PATCH 069/128] Improve `HostConfiguration` consistency check (#3897) fixes https://github.com/paritytech/polkadot-sdk/issues/3886 --------- Signed-off-by: Andrei Sandu --- polkadot/runtime/parachains/src/configuration.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index b7635dcd7b2..17022272c0c 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -187,7 +187,7 @@ pub struct HostConfiguration { /// /// Must be at least 1. pub no_show_slots: u32, - /// The number of delay tranches in total. + /// The number of delay tranches in total. Must be at least 1. pub n_delay_tranches: u32, /// The width of the zeroth delay tranche for approval assignments. This many delay tranches /// beyond 0 are all consolidated to form a wide 0 tranche. @@ -247,7 +247,7 @@ impl> Default for HostConfiguration { LookaheadExceedsTTL, /// Passed in queue size for on-demand was too large. OnDemandQueueSizeTooLarge, + /// Number of delay tranches cannot be 0. + ZeroDelayTranches, } impl HostConfiguration @@ -412,6 +414,10 @@ where return Err(OnDemandQueueSizeTooLarge) } + if self.n_delay_tranches.is_zero() { + return Err(ZeroDelayTranches) + } + Ok(()) } -- GitLab From b772cb576d03cf67dc5f8eaf19c07dc23b37d34a Mon Sep 17 00:00:00 2001 From: Ross Bulat Date: Mon, 1 Apr 2024 16:35:36 +0700 Subject: [PATCH 070/128] Pools: Make `PermissionlessWithdraw` the default claim permission (#3438) Related Issue https://github.com/paritytech/polkadot-sdk/issues/3398 This PR makes permissionless withdrawing the default option, giving any network participant access to claim pool rewards on member's behalf. Of course, members can still opt out of this by setting a `Permissioned` claim permission. Permissionless claiming has been a part of the nomination pool pallet for around 9 months now, with very limited uptake (~4% of total pool members). 1.6% of pool members are using `PermissionlessAll`, strongly suggesting it is not wanted - it is too ambiguous and doesn't provide guidance to claimers. Stakers expect rewards to be claimed on their behalf by default - I have expanded upon this in detail within the [accompanying issue's discussion](https://github.com/paritytech/polkadot-sdk/issues/3398). Other protocols have this behaviour, whereby staking rewards are received without the staker having to take any action. From this perspective, permissionless claiming is not intuitive for pool members. As evidence of this, over 150,000 DOT is currently unclaimed on Polkadot, and is growing at a non-linear rate. --- prdoc/pr_3438.prdoc | 13 +++++++ .../nomination-pools/benchmarking/src/lib.rs | 4 +- substrate/frame/nomination-pools/src/lib.rs | 31 ++++++++------- substrate/frame/nomination-pools/src/tests.rs | 39 +++++++------------ 4 files changed, 45 insertions(+), 42 deletions(-) create mode 100644 prdoc/pr_3438.prdoc diff --git a/prdoc/pr_3438.prdoc b/prdoc/pr_3438.prdoc new file mode 100644 index 00000000000..5f4a0e3d57a --- /dev/null +++ b/prdoc/pr_3438.prdoc @@ -0,0 +1,13 @@ +title: "Pools: Make PermissionlessWithdraw the default claim permission" + +doc: + - audience: Runtime User + description: | + Makes permissionless withdrawing the default claim permission, giving any network participant + access to claim pool rewards on member's behalf, by default. + +crates: + - name: pallet-nomination-pools + bump: minor + - name: pallet-nomination-pools-benchmarking + bump: minor diff --git a/substrate/frame/nomination-pools/benchmarking/src/lib.rs b/substrate/frame/nomination-pools/benchmarking/src/lib.rs index 48d7dae29ef..f7df173ec04 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/lib.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/lib.rs @@ -795,9 +795,9 @@ frame_benchmarking::benchmarks! { T::Staking::active_stake(&pool_account).unwrap(), min_create_bond + min_join_bond ); - }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::PermissionlessAll) + }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::Permissioned) verify { - assert_eq!(ClaimPermissions::::get(joiner), ClaimPermission::PermissionlessAll); + assert_eq!(ClaimPermissions::::get(joiner), ClaimPermission::Permissioned); } claim_commission { diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index f29a49a2b1b..23501cd89d2 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -461,22 +461,26 @@ pub enum ClaimPermission { PermissionlessAll, } +impl Default for ClaimPermission { + fn default() -> Self { + Self::PermissionlessWithdraw + } +} + impl ClaimPermission { + /// Permissionless compounding of pool rewards is allowed if the current permission is + /// `PermissionlessCompound`, or permissionless. fn can_bond_extra(&self) -> bool { matches!(self, ClaimPermission::PermissionlessAll | ClaimPermission::PermissionlessCompound) } + /// Permissionless payout claiming is allowed if the current permission is + /// `PermissionlessWithdraw`, or permissionless. fn can_claim_payout(&self) -> bool { matches!(self, ClaimPermission::PermissionlessAll | ClaimPermission::PermissionlessWithdraw) } } -impl Default for ClaimPermission { - fn default() -> Self { - Self::Permissioned - } -} - /// A member in a pool. #[derive( Encode, @@ -2630,7 +2634,7 @@ pub mod pallet { /// /// In the case of `origin != other`, `origin` can only bond extra pending rewards of /// `other` members assuming set_claim_permission for the given member is - /// `PermissionlessAll` or `PermissionlessCompound`. + /// `PermissionlessCompound` or `PermissionlessAll`. #[pallet::call_index(14)] #[pallet::weight( T::WeightInfo::bond_extra_transfer() @@ -2648,15 +2652,10 @@ pub mod pallet { /// Allows a pool member to set a claim permission to allow or disallow permissionless /// bonding and withdrawing. /// - /// By default, this is `Permissioned`, which implies only the pool member themselves can - /// claim their pending rewards. If a pool member wishes so, they can set this to - /// `PermissionlessAll` to allow any account to claim their rewards and bond extra to the - /// pool. - /// /// # Arguments /// /// * `origin` - Member of a pool. - /// * `actor` - Account to claim reward. // improve this + /// * `permission` - The permission to be applied. #[pallet::call_index(15)] #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] pub fn set_claim_permission( @@ -2666,16 +2665,18 @@ pub mod pallet { let who = ensure_signed(origin)?; ensure!(PoolMembers::::contains_key(&who), Error::::PoolMemberNotFound); + ClaimPermissions::::mutate(who, |source| { *source = permission; }); + Ok(()) } /// `origin` can claim payouts on some pool member `other`'s behalf. /// - /// Pool member `other` must have a `PermissionlessAll` or `PermissionlessWithdraw` in order - /// for this call to be successful. + /// Pool member `other` must have a `PermissionlessWithdraw` or `PermissionlessAll` claim + /// permission for this call to be successful. #[pallet::call_index(16)] #[pallet::weight(T::WeightInfo::claim_payout())] pub fn claim_payout_other(origin: OriginFor, other: T::AccountId) -> DispatchResult { diff --git a/substrate/frame/nomination-pools/src/tests.rs b/substrate/frame/nomination-pools/src/tests.rs index 8fb2b41b88a..32b3e9af3cd 100644 --- a/substrate/frame/nomination-pools/src/tests.rs +++ b/substrate/frame/nomination-pools/src/tests.rs @@ -2441,16 +2441,10 @@ mod claim_payout { // given assert_eq!(Currency::free_balance(&10), 35); - // Permissioned by default - assert_noop!( - Pools::claim_payout_other(RuntimeOrigin::signed(80), 10), - Error::::DoesNotHavePermission - ); + // when - assert_ok!(Pools::set_claim_permission( - RuntimeOrigin::signed(10), - ClaimPermission::PermissionlessWithdraw - )); + // NOTE: Claim permission of `PermissionlessWithdraw` allows payout claiming as default, + // so a claim permission does not need to be set for non-pool members prior to claiming. assert_ok!(Pools::claim_payout_other(RuntimeOrigin::signed(80), 10)); // then @@ -2489,7 +2483,6 @@ mod unbond { ); // Make permissionless - assert_eq!(ClaimPermissions::::get(10), ClaimPermission::Permissioned); assert_ok!(Pools::set_claim_permission( RuntimeOrigin::signed(20), ClaimPermission::PermissionlessAll @@ -4563,12 +4556,11 @@ mod withdraw_unbonded { CurrentEra::set(1); assert_eq!(PoolMembers::::get(20).unwrap().points, 20); - assert_ok!(Pools::set_claim_permission( - RuntimeOrigin::signed(20), - ClaimPermission::PermissionlessAll - )); assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 20)); - assert_eq!(ClaimPermissions::::get(20), ClaimPermission::PermissionlessAll); + assert_eq!( + ClaimPermissions::::get(20), + ClaimPermission::PermissionlessWithdraw + ); assert_eq!( pool_events_since_last_call(), @@ -4792,7 +4784,7 @@ mod create { } #[test] -fn set_claimable_actor_works() { +fn set_claim_permission_works() { ExtBuilder::default().build_and_execute(|| { // Given Currency::set_balance(&11, ExistentialDeposit::get() + 2); @@ -4811,22 +4803,19 @@ fn set_claimable_actor_works() { ] ); - // Make permissionless - assert_eq!(ClaimPermissions::::get(11), ClaimPermission::Permissioned); + // Make permissioned + assert_eq!(ClaimPermissions::::get(11), ClaimPermission::PermissionlessWithdraw); assert_noop!( - Pools::set_claim_permission( - RuntimeOrigin::signed(12), - ClaimPermission::PermissionlessAll - ), + Pools::set_claim_permission(RuntimeOrigin::signed(12), ClaimPermission::Permissioned), Error::::PoolMemberNotFound ); assert_ok!(Pools::set_claim_permission( RuntimeOrigin::signed(11), - ClaimPermission::PermissionlessAll + ClaimPermission::Permissioned )); // then - assert_eq!(ClaimPermissions::::get(11), ClaimPermission::PermissionlessAll); + assert_eq!(ClaimPermissions::::get(11), ClaimPermission::Permissioned); }); } @@ -5224,7 +5213,7 @@ mod bond_extra { assert_ok!(Pools::set_claim_permission( RuntimeOrigin::signed(10), - ClaimPermission::PermissionlessAll + ClaimPermission::PermissionlessCompound )); assert_ok!(Pools::bond_extra_other(RuntimeOrigin::signed(50), 10, BondExtra::Rewards)); assert_eq!(Currency::free_balance(&default_reward_account()), 7); -- GitLab From e6bd9205432bb524e94c9bd13048d645ec9aa5c7 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Mon, 1 Apr 2024 13:23:29 +0300 Subject: [PATCH 071/128] Fix 0007-dispute-freshly-finalized.zndsl failing (#3893) Test started failing after https://github.com/paritytech/polkadot-sdk/commit/66051adb619d2119771920218e2de75fa037d7e8 which enabled approval coalescing, that was expected to happen because the test required an polkadot_parachain_approval_checking_finality_lag of 0, which can't happen with max_approval_coalesce_count greater than 1 because we always delay the approval for no_show_duration_ticks/2 in case we can coalesce it with other approvals. So relax a bit the restrictions, since we don't actually care that the lags are 0, but the fact the finalities are progressing and are not stuck. Signed-off-by: Alexandru Gheorghe --- .../functional/0007-dispute-freshly-finalized.zndsl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl index 62d5a9768f9..d1ed0250d4d 100644 --- a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl +++ b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl @@ -21,9 +21,9 @@ honest: reports polkadot_parachain_candidate_dispute_concluded{validity="valid"} honest: reports polkadot_parachain_candidate_dispute_concluded{validity="invalid"} is 0 within 100 seconds # Check lag - approval -honest: reports polkadot_parachain_approval_checking_finality_lag is 0 +honest: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 # Check lag - dispute conclusion -honest: reports polkadot_parachain_disputes_finality_lag is 0 +honest: reports polkadot_parachain_disputes_finality_lag is lower than 2 -- GitLab From d6f68bb9062167537211cc05286809771fc8861a Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Mon, 1 Apr 2024 16:03:26 +0300 Subject: [PATCH 072/128] primitives: Move out of staging released APIs (#3925) Runtime release 1.2 includes bumping of the ParachainHost APIs up to v10, so let's move all the released APIs out of vstaging folder, this PR does not include any logic changes only renaming of the modules and some moving around. Signed-off-by: Alexandru Gheorghe --- .../src/blockchain_rpc_client.rs | 4 +- .../src/rpc_client.rs | 13 +++-- .../node/core/approval-voting/src/import.rs | 8 +-- polkadot/node/core/approval-voting/src/lib.rs | 8 +-- .../node/core/approval-voting/src/tests.rs | 4 +- polkadot/node/core/backing/src/lib.rs | 11 ++-- polkadot/node/core/backing/src/tests/mod.rs | 4 +- .../core/dispute-coordinator/src/tests.rs | 6 +-- polkadot/node/core/provisioner/src/lib.rs | 6 +-- polkadot/node/core/runtime-api/src/cache.rs | 25 ++++----- polkadot/node/core/runtime-api/src/tests.rs | 14 +++-- .../src/pov_requester/mod.rs | 4 +- .../src/requester/tests.rs | 2 +- .../src/tests/state.rs | 4 +- .../src/collator_side/tests/mod.rs | 5 +- .../dispute-distribution/src/tests/mod.rs | 4 +- .../src/legacy_v1/tests.rs | 2 +- .../src/v2/statement_store.rs | 2 +- polkadot/node/service/src/chain_spec.rs | 4 +- .../src/lib/approval/message_generator.rs | 4 +- .../src/lib/mock/runtime_api.rs | 2 +- polkadot/node/subsystem-types/src/messages.rs | 18 +++---- .../subsystem-types/src/runtime_client.rs | 20 +++---- .../node/subsystem-util/src/runtime/mod.rs | 11 ++-- polkadot/primitives/src/lib.rs | 37 ++++++------- polkadot/primitives/src/runtime_api.rs | 14 +++-- .../src/{v6 => v7}/async_backing.rs | 0 .../src/{v6 => v7}/executor_params.rs | 0 polkadot/primitives/src/{v6 => v7}/metrics.rs | 0 polkadot/primitives/src/{v6 => v7}/mod.rs | 51 +++++++++++++++++- polkadot/primitives/src/{v6 => v7}/signed.rs | 0 .../primitives/src/{v6 => v7}/slashing.rs | 0 polkadot/primitives/src/vstaging/mod.rs | 53 +------------------ polkadot/runtime/parachains/src/builder.rs | 14 ++--- .../runtime/parachains/src/configuration.rs | 7 ++- .../src/configuration/migration/v10.rs | 2 +- .../src/configuration/migration/v11.rs | 3 +- polkadot/runtime/parachains/src/disputes.rs | 10 ++-- .../src/paras_inherent/benchmarking.rs | 2 +- .../parachains/src/paras_inherent/mod.rs | 6 +-- .../parachains/src/runtime_api_impl/mod.rs | 2 +- .../src/runtime_api_impl/{v7.rs => v10.rs} | 34 +++++++++--- .../src/runtime_api_impl/vstaging.rs | 33 ++---------- polkadot/runtime/rococo/src/lib.rs | 23 ++++---- polkadot/runtime/test-runtime/src/lib.rs | 19 ++++--- polkadot/runtime/westend/src/lib.rs | 24 ++++----- 46 files changed, 240 insertions(+), 279 deletions(-) rename polkadot/primitives/src/{v6 => v7}/async_backing.rs (100%) rename polkadot/primitives/src/{v6 => v7}/executor_params.rs (100%) rename polkadot/primitives/src/{v6 => v7}/metrics.rs (100%) rename polkadot/primitives/src/{v6 => v7}/mod.rs (97%) rename polkadot/primitives/src/{v6 => v7}/signed.rs (100%) rename polkadot/primitives/src/{v6 => v7}/slashing.rs (100%) rename polkadot/runtime/parachains/src/runtime_api_impl/{v7.rs => v10.rs} (93%) diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index 8d8a2920b4e..aa5e67e453f 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -26,9 +26,7 @@ use polkadot_core_primitives::{Block, BlockNumber, Hash, Header}; use polkadot_overseer::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_primitives::{ async_backing::{AsyncBackingParams, BackingState}, - slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - CoreIndex, + slashing, ApprovalVotingParams, CoreIndex, NodeFeatures, }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; use sc_client_api::AuxStore; diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index 8cf5ccf0c70..547803865c2 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -32,13 +32,12 @@ use parity_scale_codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain::{ async_backing::{AsyncBackingParams, BackingState}, - slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, - OccupiedCoreAssumption, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + slashing, ApprovalVotingParams, BlockNumber, CandidateCommitments, CandidateEvent, + CandidateHash, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, + InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index d34191fba31..f4be42a4845 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -45,8 +45,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::{determine_new_blocks, runtime::RuntimeInfo}; use polkadot_primitives::{ - vstaging::node_features, BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, - ConsensusLog, CoreIndex, GroupIndex, Hash, Header, SessionIndex, + node_features, BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, ConsensusLog, + CoreIndex, GroupIndex, Hash, Header, SessionIndex, }; use sc_keystore::LocalKeystore; use sp_consensus_slots::Slot; @@ -619,8 +619,8 @@ pub(crate) mod tests { use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_node_subsystem_util::database::Database; use polkadot_primitives::{ - vstaging::{node_features::FeatureIndex, NodeFeatures}, - ExecutorParams, Id as ParaId, IndexedVec, SessionInfo, ValidatorId, ValidatorIndex, + node_features::FeatureIndex, ExecutorParams, Id as ParaId, IndexedVec, NodeFeatures, + SessionInfo, ValidatorId, ValidatorIndex, }; pub(crate) use sp_consensus_babe::{ digests::{CompatibleDigestItem, PreDigest, SecondaryVRFPreDigest}, diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 76b3d476e28..57e9af4a518 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -54,10 +54,10 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - vstaging::{ApprovalVoteMultipleCandidates, ApprovalVotingParams}, - BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, DisputeStatement, - ExecutorParams, GroupIndex, Hash, PvfExecKind, SessionIndex, SessionInfo, - ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, + ApprovalVoteMultipleCandidates, ApprovalVotingParams, BlockNumber, CandidateHash, + CandidateIndex, CandidateReceipt, CoreIndex, DisputeStatement, ExecutorParams, GroupIndex, + Hash, PvfExecKind, SessionIndex, SessionInfo, ValidDisputeStatementKind, ValidatorId, + ValidatorIndex, ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair; diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 1483af56585..f7bbbca4b8a 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -37,8 +37,8 @@ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_overseer::HeadSupportsParachains; use polkadot_primitives::{ - vstaging::NodeFeatures, ApprovalVote, CandidateCommitments, CandidateEvent, CoreIndex, - GroupIndex, Header, Id as ParaId, IndexedVec, ValidationCode, ValidatorSignature, + ApprovalVote, CandidateCommitments, CandidateEvent, CoreIndex, GroupIndex, Header, + Id as ParaId, IndexedVec, NodeFeatures, ValidationCode, ValidatorSignature, }; use std::time::Duration; diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index b5cad4cf5f0..23acb045094 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -105,12 +105,11 @@ use polkadot_node_subsystem_util::{ Validator, }; use polkadot_primitives::{ - vstaging::{node_features::FeatureIndex, NodeFeatures}, - BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, - CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, GroupRotationInfo, - Hash, Id as ParaId, IndexedVec, PersistedValidationData, PvfExecKind, SessionIndex, - SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, - ValidityAttestation, + node_features::FeatureIndex, BackedCandidate, CandidateCommitments, CandidateHash, + CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, + GroupRotationInfo, Hash, Id as ParaId, IndexedVec, NodeFeatures, PersistedValidationData, + PvfExecKind, SessionIndex, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, + ValidatorSignature, ValidityAttestation, }; use sp_keystore::KeystorePtr; use statement_table::{ diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index fdb47581ea3..d1969e656db 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -33,8 +33,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - vstaging::node_features, CandidateDescriptor, GroupRotationInfo, HeadData, - PersistedValidationData, PvfExecKind, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, + node_features, CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, + PvfExecKind, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, }; use rstest::rstest; use sp_application_crypto::AppCrypto; diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs index 7c1f4ff241d..13cf2df8822 100644 --- a/polkadot/node/core/dispute-coordinator/src/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/tests.rs @@ -61,9 +61,9 @@ use polkadot_node_subsystem_test_helpers::{ make_buffered_subsystem_context, mock::new_leaf, TestSubsystemContextHandle, }; use polkadot_primitives::{ - vstaging::NodeFeatures, ApprovalVote, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CandidateReceipt, CoreIndex, DisputeStatement, ExecutorParams, GroupIndex, Hash, - HeadData, Header, IndexedVec, MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, + ApprovalVote, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CandidateReceipt, CoreIndex, DisputeStatement, ExecutorParams, GroupIndex, Hash, HeadData, + Header, IndexedVec, MultiDisputeStatementSet, NodeFeatures, ScrapedOnChainVotes, SessionIndex, SessionInfo, SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, }; diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 3ccf499f325..5cfcb96dc2b 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -42,9 +42,9 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - vstaging::{node_features::FeatureIndex, NodeFeatures}, - BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, CoreState, Hash, - Id as ParaId, OccupiedCoreAssumption, SessionIndex, SignedAvailabilityBitfield, ValidatorIndex, + node_features::FeatureIndex, BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, + CoreIndex, CoreState, Hash, Id as ParaId, NodeFeatures, OccupiedCoreAssumption, SessionIndex, + SignedAvailabilityBitfield, ValidatorIndex, }; use std::collections::{BTreeMap, HashMap}; diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 9674cda9838..acdb256ab36 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -20,14 +20,12 @@ use schnellru::{ByLength, LruMap}; use sp_consensus_babe::Epoch; use polkadot_primitives::{ - async_backing, slashing, - vstaging::{self, ApprovalVotingParams}, - AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, slashing, ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, + CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; /// For consistency we have the same capacity for all caches. We use 128 as we'll only need that @@ -69,7 +67,7 @@ pub(crate) struct RequestResultCache { disabled_validators: LruMap>, para_backing_state: LruMap<(Hash, ParaId), Option>, async_backing_params: LruMap, - node_features: LruMap, + node_features: LruMap, approval_voting_params: LruMap, claim_queue: LruMap>>, } @@ -454,17 +452,14 @@ impl RequestResultCache { self.minimum_backing_votes.insert(session_index, minimum_backing_votes); } - pub(crate) fn node_features( - &mut self, - session_index: SessionIndex, - ) -> Option<&vstaging::NodeFeatures> { + pub(crate) fn node_features(&mut self, session_index: SessionIndex) -> Option<&NodeFeatures> { self.node_features.get(&session_index).map(|f| &*f) } pub(crate) fn cache_node_features( &mut self, session_index: SessionIndex, - features: vstaging::NodeFeatures, + features: NodeFeatures, ) { self.node_features.insert(session_index, features); } @@ -594,6 +589,6 @@ pub(crate) enum RequestResult { DisabledValidators(Hash, Vec), ParaBackingState(Hash, ParaId, Option), AsyncBackingParams(Hash, async_backing::AsyncBackingParams), - NodeFeatures(SessionIndex, vstaging::NodeFeatures), + NodeFeatures(SessionIndex, NodeFeatures), ClaimQueue(Hash, BTreeMap>), } diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index b51682aa0f4..73c661c4076 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -20,14 +20,12 @@ use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfigurati use polkadot_node_subsystem::SpawnGlue; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_primitives::{ - async_backing, slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, ValidatorId, - ValidatorIndex, ValidatorSignature, + async_backing, slashing, ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, + CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + Slot, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_api::ApiError; use sp_core::testing::TaskExecutor; diff --git a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs index 4e23030aa49..f99002d4188 100644 --- a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs @@ -147,9 +147,7 @@ mod tests { AllMessages, AvailabilityDistributionMessage, RuntimeApiMessage, RuntimeApiRequest, }; use polkadot_node_subsystem_test_helpers as test_helpers; - use polkadot_primitives::{ - vstaging::NodeFeatures, CandidateHash, ExecutorParams, Hash, ValidatorIndex, - }; + use polkadot_primitives::{CandidateHash, ExecutorParams, Hash, NodeFeatures, ValidatorIndex}; use test_helpers::mock::make_ferdie_keystore; use super::*; diff --git a/polkadot/node/network/availability-distribution/src/requester/tests.rs b/polkadot/node/network/availability-distribution/src/requester/tests.rs index 2f5d900b037..0dedd4f091a 100644 --- a/polkadot/node/network/availability-distribution/src/requester/tests.rs +++ b/polkadot/node/network/availability-distribution/src/requester/tests.rs @@ -25,7 +25,7 @@ use polkadot_node_primitives::{BlockData, ErasureChunk, PoV}; use polkadot_node_subsystem_test_helpers::mock::new_leaf; use polkadot_node_subsystem_util::runtime::RuntimeInfo; use polkadot_primitives::{ - vstaging::NodeFeatures, BlockNumber, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, + BlockNumber, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, NodeFeatures, ScheduledCore, SessionIndex, SessionInfo, }; use sp_core::traits::SpawnNamed; diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs index 66a8d8fcdcf..1d814b4fd0e 100644 --- a/polkadot/node/network/availability-distribution/src/tests/state.rs +++ b/polkadot/node/network/availability-distribution/src/tests/state.rs @@ -47,8 +47,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - vstaging::NodeFeatures, CandidateHash, CoreState, ExecutorParams, GroupIndex, Hash, - Id as ParaId, ScheduledCore, SessionInfo, ValidatorIndex, + CandidateHash, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, NodeFeatures, + ScheduledCore, SessionInfo, ValidatorIndex, }; use test_helpers::mock::{make_ferdie_keystore, new_leaf}; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index bcf0b34e631..de561e9f77f 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -45,9 +45,8 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt}; use polkadot_primitives::{ - vstaging::NodeFeatures, AuthorityDiscoveryId, CollatorPair, ExecutorParams, GroupIndex, - GroupRotationInfo, IndexedVec, ScheduledCore, SessionIndex, SessionInfo, ValidatorId, - ValidatorIndex, + AuthorityDiscoveryId, CollatorPair, ExecutorParams, GroupIndex, GroupRotationInfo, IndexedVec, + NodeFeatures, ScheduledCore, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, }; use polkadot_primitives_test_helpers::TestCandidateBuilder; use test_helpers::mock::new_leaf; diff --git a/polkadot/node/network/dispute-distribution/src/tests/mod.rs b/polkadot/node/network/dispute-distribution/src/tests/mod.rs index 880d1b18032..5ad790fb01c 100644 --- a/polkadot/node/network/dispute-distribution/src/tests/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/tests/mod.rs @@ -57,8 +57,8 @@ use polkadot_node_subsystem_test_helpers::{ subsystem_test_harness, TestSubsystemContextHandle, }; use polkadot_primitives::{ - vstaging::NodeFeatures, AuthorityDiscoveryId, CandidateHash, CandidateReceipt, ExecutorParams, - Hash, SessionIndex, SessionInfo, + AuthorityDiscoveryId, CandidateHash, CandidateReceipt, ExecutorParams, Hash, NodeFeatures, + SessionIndex, SessionInfo, }; use self::mock::{ diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs index 08e9d69d8ee..7d355cc8872 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -43,7 +43,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers::mock::{make_ferdie_keystore, new_leaf}; use polkadot_primitives::{ - vstaging::NodeFeatures, ExecutorParams, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, + ExecutorParams, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, NodeFeatures, SessionInfo, ValidationCode, }; use polkadot_primitives_test_helpers::{ diff --git a/polkadot/node/network/statement-distribution/src/v2/statement_store.rs b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs index 022461e5551..a3b2636d2ff 100644 --- a/polkadot/node/network/statement-distribution/src/v2/statement_store.rs +++ b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs @@ -292,7 +292,7 @@ impl GroupStatements { mod tests { use super::*; - use polkadot_primitives::v6::{Hash, SigningContext, ValidatorPair}; + use polkadot_primitives::v7::{Hash, SigningContext, ValidatorPair}; use sp_application_crypto::Pair as PairT; #[test] diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index c03ce1db094..1b6ba99777b 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -123,8 +123,8 @@ fn default_parachains_host_configuration( ) -> polkadot_runtime_parachains::configuration::HostConfiguration { use polkadot_primitives::{ - vstaging::{node_features::FeatureIndex, ApprovalVotingParams}, - AsyncBackingParams, MAX_CODE_SIZE, MAX_POV_SIZE, + node_features::FeatureIndex, ApprovalVotingParams, AsyncBackingParams, MAX_CODE_SIZE, + MAX_POV_SIZE, }; polkadot_runtime_parachains::configuration::HostConfiguration { diff --git a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs index 3b08d0ed861..619a3617ca4 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs @@ -41,8 +41,8 @@ use polkadot_node_primitives::approval::{ v2::{CoreBitfield, IndirectAssignmentCertV2, IndirectSignedApprovalVoteV2}, }; use polkadot_primitives::{ - vstaging::ApprovalVoteMultipleCandidates, CandidateEvent, CandidateHash, CandidateIndex, - CoreIndex, Hash, SessionInfo, Slot, ValidatorId, ValidatorIndex, ASSIGNMENT_KEY_TYPE_ID, + ApprovalVoteMultipleCandidates, CandidateEvent, CandidateHash, CandidateIndex, CoreIndex, Hash, + SessionInfo, Slot, ValidatorId, ValidatorIndex, ASSIGNMENT_KEY_TYPE_ID, }; use rand::{seq::SliceRandom, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; diff --git a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs index 3c39de870a2..b73d61321cd 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs @@ -26,7 +26,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::OverseerSignal; use polkadot_primitives::{ - vstaging::NodeFeatures, CandidateEvent, CandidateReceipt, CoreState, GroupIndex, IndexedVec, + CandidateEvent, CandidateReceipt, CoreState, GroupIndex, IndexedVec, NodeFeatures, OccupiedCore, SessionIndex, SessionInfo, ValidatorIndex, }; use sp_consensus_babe::Epoch as BabeEpoch; diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index d84b0b6dd14..2ca6728af01 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -42,16 +42,14 @@ use polkadot_node_primitives::{ ValidationResult, }; use polkadot_primitives::{ - async_backing, slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, CandidateHash, - CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreIndex, CoreState, - DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, HeadData, - Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - PvfExecKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, - SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, slashing, ApprovalVotingParams, AuthorityDiscoveryId, BackedCandidate, + BlockNumber, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, + CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, + GroupRotationInfo, Hash, HeadData, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, MultiDisputeStatementSet, NodeFeatures, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, PvfExecKind, SessionIndex, SessionInfo, + SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use polkadot_statement_table::v2::Misbehavior; use std::{ diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 7474b4120cc..664d10ed1af 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -16,16 +16,12 @@ use async_trait::async_trait; use polkadot_primitives::{ - async_backing, - runtime_api::ParachainHost, - slashing, - vstaging::{self, ApprovalVotingParams}, - Block, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Hash, Header, Id, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, runtime_api::ParachainHost, slashing, ApprovalVotingParams, Block, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, + CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Header, Id, + InboundDownwardMessage, InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; use sc_client_api::{AuxStore, HeaderBackend}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; @@ -324,7 +320,7 @@ pub trait RuntimeApiSubsystemClient { // === v9 === /// Get the node features. - async fn node_features(&self, at: Hash) -> Result; + async fn node_features(&self, at: Hash) -> Result; // == v10: Approval voting params == /// Approval voting configuration parameters @@ -586,7 +582,7 @@ where self.client.runtime_api().async_backing_params(at) } - async fn node_features(&self, at: Hash) -> Result { + async fn node_features(&self, at: Hash) -> Result { self.client.runtime_api().node_features(at) } diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 481625acb32..714384b32e3 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -30,12 +30,11 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ - slashing, - vstaging::{node_features::FeatureIndex, NodeFeatures}, - AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, EncodeAs, ExecutorParams, - GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore, ScrapedOnChainVotes, - SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, + node_features::FeatureIndex, slashing, AsyncBackingParams, CandidateEvent, CandidateHash, + CoreState, EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, + NodeFeatures, OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, + SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, + ValidatorIndex, LEGACY_MIN_BACKING_VOTES, }; use crate::{ diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 745195ce092..d4eeb3cc3d2 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -20,7 +20,7 @@ #![cfg_attr(not(feature = "std"), no_std)] // `v6` is currently the latest stable version of the runtime API. -pub mod v6; +pub mod v7; // The 'staging' version is special - it contains primitives which are // still in development. Once they are considered stable, they will be @@ -33,26 +33,27 @@ pub mod runtime_api; // Current primitives not requiring versioning are exported here. // Primitives requiring versioning must not be exported and must be referred by an exact version. -pub use v6::{ +pub use v7::{ async_backing, byzantine_threshold, check_candidate_backing, collator_signature_payload, - effective_minimum_backing_votes, executor_params, metric_definitions, slashing, + effective_minimum_backing_votes, executor_params, metric_definitions, node_features, slashing, supermajority_threshold, well_known_keys, AbridgedHostConfiguration, AbridgedHrmpChannel, - AccountId, AccountIndex, AccountPublic, ApprovalVote, AssignmentId, AsyncBackingParams, - AuthorityDiscoveryId, AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, - BlockId, BlockNumber, CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, - CandidateIndex, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, - CollatorId, CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, - CoreIndex, CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, - EncodeAs, ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, + AccountId, AccountIndex, AccountPublic, ApprovalVote, ApprovalVoteMultipleCandidates, + ApprovalVotingParams, AssignmentId, AsyncBackingParams, AuthorityDiscoveryId, + AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, + CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex, + CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, + CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, + CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, + ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, Hash, HashT, HeadData, Header, HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec, - InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, - OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, - PersistedValidationData, PvfCheckStatement, PvfExecKind, PvfPrepKind, RuntimeMetricLabel, - RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, RuntimeMetricOp, - RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signature, - Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, SignedStatement, - SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield, + InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, NodeFeatures, + Nonce, OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, + ParathreadEntry, PersistedValidationData, PvfCheckStatement, PvfExecKind, PvfPrepKind, + RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, + RuntimeMetricOp, RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, + SessionInfo, Signature, Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, + SignedStatement, SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, UncheckedSignedStatement, UpgradeGoAhead, UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, @@ -63,4 +64,4 @@ pub use v6::{ }; #[cfg(feature = "std")] -pub use v6::{AssignmentPair, CollatorPair, ValidatorPair}; +pub use v7::{AssignmentPair, CollatorPair, ValidatorPair}; diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index 6dca33f8823..f611936f270 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -114,13 +114,11 @@ //! separated from the stable primitives. use crate::{ - async_backing, slashing, - vstaging::{self, ApprovalVotingParams}, - AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Hash, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, slashing, ApprovalVotingParams, AsyncBackingParams, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, + CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, NodeFeatures, + OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, + SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature, }; use polkadot_core_primitives as pcp; @@ -279,7 +277,7 @@ sp_api::decl_runtime_apis! { /// Get node features. /// This is a staging method! Do not use on production runtimes! #[api_version(9)] - fn node_features() -> vstaging::NodeFeatures; + fn node_features() -> NodeFeatures; /***** Added in v10 *****/ /// Approval voting configuration parameters diff --git a/polkadot/primitives/src/v6/async_backing.rs b/polkadot/primitives/src/v7/async_backing.rs similarity index 100% rename from polkadot/primitives/src/v6/async_backing.rs rename to polkadot/primitives/src/v7/async_backing.rs diff --git a/polkadot/primitives/src/v6/executor_params.rs b/polkadot/primitives/src/v7/executor_params.rs similarity index 100% rename from polkadot/primitives/src/v6/executor_params.rs rename to polkadot/primitives/src/v7/executor_params.rs diff --git a/polkadot/primitives/src/v6/metrics.rs b/polkadot/primitives/src/v7/metrics.rs similarity index 100% rename from polkadot/primitives/src/v6/metrics.rs rename to polkadot/primitives/src/v7/metrics.rs diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v7/mod.rs similarity index 97% rename from polkadot/primitives/src/v6/mod.rs rename to polkadot/primitives/src/v7/mod.rs index 21cee753265..d4f4a633577 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v7/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! `V6` Primitives. +//! `V7` Primitives. use bitvec::{field::BitField, slice::BitSlice, vec::BitVec}; use parity_scale_codec::{Decode, Encode}; @@ -1184,6 +1184,32 @@ impl<'a> ApprovalVoteMultipleCandidates<'a> { } } +/// Approval voting configuration parameters +#[derive( + RuntimeDebug, + Copy, + Clone, + PartialEq, + Encode, + Decode, + TypeInfo, + serde::Serialize, + serde::Deserialize, +)] +pub struct ApprovalVotingParams { + /// The maximum number of candidates `approval-voting` can vote for with + /// a single signatures. + /// + /// Setting it to 1, means we send the approval as soon as we have it available. + pub max_approval_coalesce_count: u32, +} + +impl Default for ApprovalVotingParams { + fn default() -> Self { + Self { max_approval_coalesce_count: 1 } + } +} + /// Custom validity errors used in Polkadot while validating transactions. #[repr(u8)] pub enum ValidityError { @@ -1947,6 +1973,29 @@ pub enum PvfExecKind { Approval, } +/// Bit indices in the `HostConfiguration.node_features` that correspond to different node features. +pub type NodeFeatures = BitVec; + +/// Module containing feature-specific bit indices into the `NodeFeatures` bitvec. +pub mod node_features { + /// A feature index used to identify a bit into the node_features array stored + /// in the HostConfiguration. + #[repr(u8)] + pub enum FeatureIndex { + /// Tells if tranch0 assignments could be sent in a single certificate. + /// Reserved for: `` + EnableAssignmentsV2 = 0, + /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bits. + /// The value stored there represents the assumed core index where the candidates + /// are backed. This is needed for the elastic scaling MVP. + ElasticScalingMVP = 1, + /// First unassigned feature bit. + /// Every time a new feature flag is assigned it should take this value. + /// and this should be incremented. + FirstUnassigned = 2, + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/polkadot/primitives/src/v6/signed.rs b/polkadot/primitives/src/v7/signed.rs similarity index 100% rename from polkadot/primitives/src/v6/signed.rs rename to polkadot/primitives/src/v7/signed.rs diff --git a/polkadot/primitives/src/v6/slashing.rs b/polkadot/primitives/src/v7/slashing.rs similarity index 100% rename from polkadot/primitives/src/v6/slashing.rs rename to polkadot/primitives/src/v7/slashing.rs diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 94e9e892029..1af73993f64 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -17,7 +17,7 @@ //! Staging Primitives. // Put any primitives used by staging APIs functions here -pub use crate::v6::*; +use crate::v7::*; use sp_std::prelude::*; use parity_scale_codec::{Decode, Encode}; @@ -25,32 +25,6 @@ use primitives::RuntimeDebug; use scale_info::TypeInfo; use sp_arithmetic::Perbill; -/// Approval voting configuration parameters -#[derive( - RuntimeDebug, - Copy, - Clone, - PartialEq, - Encode, - Decode, - TypeInfo, - serde::Serialize, - serde::Deserialize, -)] -pub struct ApprovalVotingParams { - /// The maximum number of candidates `approval-voting` can vote for with - /// a single signatures. - /// - /// Setting it to 1, means we send the approval as soon as we have it available. - pub max_approval_coalesce_count: u32, -} - -impl Default for ApprovalVotingParams { - fn default() -> Self { - Self { max_approval_coalesce_count: 1 } - } -} - /// Scheduler configuration parameters. All coretime/ondemand parameters are here. #[derive( RuntimeDebug, @@ -125,28 +99,3 @@ impl> Default for SchedulerParams } } } - -use bitvec::vec::BitVec; - -/// Bit indices in the `HostConfiguration.node_features` that correspond to different node features. -pub type NodeFeatures = BitVec; - -/// Module containing feature-specific bit indices into the `NodeFeatures` bitvec. -pub mod node_features { - /// A feature index used to identify a bit into the node_features array stored - /// in the HostConfiguration. - #[repr(u8)] - pub enum FeatureIndex { - /// Tells if tranch0 assignments could be sent in a single certificate. - /// Reserved for: `` - EnableAssignmentsV2 = 0, - /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bits. - /// The value stored there represents the assumed core index where the candidates - /// are backed. This is needed for the elastic scaling MVP. - ElasticScalingMVP = 1, - /// First unassigned feature bit. - /// Every time a new feature flag is assigned it should take this value. - /// and this should be incremented. - FirstUnassigned = 2, - } -} diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 73617010f6d..e29c2e218ed 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -25,13 +25,13 @@ use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use primitives::{ - collator_signature_payload, vstaging::node_features::FeatureIndex, AvailabilityBitfield, - BackedCandidate, CandidateCommitments, CandidateDescriptor, CandidateHash, CollatorId, - CollatorSignature, CommittedCandidateReceipt, CompactStatement, CoreIndex, DisputeStatement, - DisputeStatementSet, GroupIndex, HeadData, Id as ParaId, IndexedVec, - InherentData as ParachainsInherentData, InvalidDisputeStatementKind, PersistedValidationData, - SessionIndex, SigningContext, UncheckedSigned, ValidDisputeStatementKind, ValidationCode, - ValidatorId, ValidatorIndex, ValidityAttestation, + collator_signature_payload, node_features::FeatureIndex, AvailabilityBitfield, BackedCandidate, + CandidateCommitments, CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, + CommittedCandidateReceipt, CompactStatement, CoreIndex, DisputeStatement, DisputeStatementSet, + GroupIndex, HeadData, Id as ParaId, IndexedVec, InherentData as ParachainsInherentData, + InvalidDisputeStatementKind, PersistedValidationData, SessionIndex, SigningContext, + UncheckedSigned, ValidDisputeStatementKind, ValidationCode, ValidatorId, ValidatorIndex, + ValidityAttestation, }; use sp_core::{sr25519, H256}; use sp_runtime::{ diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 17022272c0c..e1246fb8897 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -26,10 +26,9 @@ use polkadot_parachain_primitives::primitives::{ MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM, }; use primitives::{ - vstaging::{ApprovalVotingParams, NodeFeatures}, - AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, SessionIndex, - LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, - ON_DEMAND_MAX_QUEUE_MAX_SIZE, + ApprovalVotingParams, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, + NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, + MAX_POV_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; use sp_std::prelude::*; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v10.rs b/polkadot/runtime/parachains/src/configuration/migration/v10.rs index cf228610e5c..3c8d6084ace 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v10.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v10.rs @@ -20,7 +20,7 @@ use crate::configuration::{Config, Pallet}; use frame_support::{pallet_prelude::*, traits::Defensive, weights::Weight}; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - vstaging::NodeFeatures, AsyncBackingParams, Balance, ExecutorParams, SessionIndex, + AsyncBackingParams, Balance, ExecutorParams, NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::Perbill; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v11.rs b/polkadot/runtime/parachains/src/configuration/migration/v11.rs index f6e0e043164..7ed9d086885 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v11.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v11.rs @@ -22,14 +22,13 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - vstaging::ApprovalVotingParams, AsyncBackingParams, ExecutorParams, SessionIndex, + ApprovalVotingParams, AsyncBackingParams, ExecutorParams, NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; use polkadot_core_primitives::Balance; -use primitives::vstaging::NodeFeatures; use sp_arithmetic::Perbill; use super::v10::V10HostConfiguration; diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs index cffad42e0ec..8bba97ce4bc 100644 --- a/polkadot/runtime/parachains/src/disputes.rs +++ b/polkadot/runtime/parachains/src/disputes.rs @@ -25,11 +25,11 @@ use frame_system::pallet_prelude::*; use parity_scale_codec::{Decode, Encode}; use polkadot_runtime_metrics::get_current_time; use primitives::{ - byzantine_threshold, supermajority_threshold, vstaging::ApprovalVoteMultipleCandidates, - ApprovalVote, CandidateHash, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, - CompactStatement, ConsensusLog, DisputeState, DisputeStatement, DisputeStatementSet, - ExplicitDisputeStatement, InvalidDisputeStatementKind, MultiDisputeStatementSet, SessionIndex, - SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, + byzantine_threshold, supermajority_threshold, ApprovalVote, ApprovalVoteMultipleCandidates, + CandidateHash, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CompactStatement, + ConsensusLog, DisputeState, DisputeStatement, DisputeStatementSet, ExplicitDisputeStatement, + InvalidDisputeStatementKind, MultiDisputeStatementSet, SessionIndex, SigningContext, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, }; use scale_info::TypeInfo; use sp_runtime::{ diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index 1b07acffb15..8f6f2166a66 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -20,7 +20,7 @@ use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use frame_system::RawOrigin; use sp_std::{cmp::min, collections::btree_map::BTreeMap}; -use primitives::v6::GroupIndex; +use primitives::v7::GroupIndex; use crate::builder::BenchBuilder; diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 6a20a10a8d7..31c9ab84b60 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -43,9 +43,9 @@ use frame_support::{ use frame_system::pallet_prelude::*; use pallet_babe::{self, ParentBlockRandomness}; use primitives::{ - effective_minimum_backing_votes, vstaging::node_features::FeatureIndex, BackedCandidate, - CandidateHash, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, - CoreIndex, DisputeStatementSet, HeadData, InherentData as ParachainsInherentData, + effective_minimum_backing_votes, node_features::FeatureIndex, BackedCandidate, CandidateHash, + CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CoreIndex, + DisputeStatementSet, HeadData, InherentData as ParachainsInherentData, MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, SignedAvailabilityBitfields, SigningContext, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, ValidatorId, ValidatorIndex, ValidityAttestation, PARACHAINS_INHERENT_IDENTIFIER, diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs index ba74e488cd3..ed2e95b3cfa 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs @@ -26,5 +26,5 @@ //! 2. Move methods from `vstaging` to `v3`. The new stable version should include all methods from //! `vstaging` tagged with the new version number (e.g. all `v3` methods). -pub mod v7; +pub mod v10; pub mod vstaging; diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs similarity index 93% rename from polkadot/runtime/parachains/src/runtime_api_impl/v7.rs rename to polkadot/runtime/parachains/src/runtime_api_impl/v10.rs index 171f3f746a8..21f54121ab1 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs @@ -14,7 +14,7 @@ //! A module exporting runtime API implementation functions for all runtime APIs using `v5` //! primitives. //! -//! Runtimes implementing the v2 runtime API are recommended to forward directly to these +//! Runtimes implementing the v10 runtime API are recommended to forward directly to these //! functions. use crate::{ @@ -29,11 +29,12 @@ use primitives::{ AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, InboundHrmpLimitations, OutboundHrmpChannelLimitations, }, - slashing, AuthorityDiscoveryId, CandidateEvent, CandidateHash, CommittedCandidateReceipt, - CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCore, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + slashing, ApprovalVotingParams, AuthorityDiscoveryId, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, + GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + NodeFeatures, OccupiedCore, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_runtime::traits::One; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -517,3 +518,24 @@ pub fn backing_state( pub fn async_backing_params() -> AsyncBackingParams { >::config().async_backing_params } + +/// Implementation for `DisabledValidators` +// CAVEAT: this should only be called on the node side +// as it might produce incorrect results on session boundaries +pub fn disabled_validators() -> Vec +where + T: shared::Config, +{ + >::disabled_validators() +} + +/// Returns the current state of the node features. +pub fn node_features() -> NodeFeatures { + >::config().node_features +} + +/// Approval voting subsystem configuration parameters +pub fn approval_voting_params() -> ApprovalVotingParams { + let config = >::config(); + config.approval_voting_params +} diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 296b872e8d4..28be3f53863 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,36 +16,9 @@ //! Put implementations of functions from staging APIs here. -use crate::{configuration, initializer, scheduler, shared}; -use primitives::{ - vstaging::{ApprovalVotingParams, NodeFeatures}, - CoreIndex, Id as ParaId, ValidatorIndex, -}; -use sp_std::{ - collections::{btree_map::BTreeMap, vec_deque::VecDeque}, - prelude::Vec, -}; - -/// Implementation for `DisabledValidators` -// CAVEAT: this should only be called on the node side -// as it might produce incorrect results on session boundaries -pub fn disabled_validators() -> Vec -where - T: shared::Config, -{ - >::disabled_validators() -} - -/// Returns the current state of the node features. -pub fn node_features() -> NodeFeatures { - >::config().node_features -} - -/// Approval voting subsystem configuration parameters -pub fn approval_voting_params() -> ApprovalVotingParams { - let config = >::config(); - config.approval_voting_params -} +use crate::scheduler; +use primitives::{CoreIndex, Id as ParaId}; +use sp_std::collections::{btree_map::BTreeMap, vec_deque::VecDeque}; /// Returns the claimqueue from the scheduler pub fn claim_queue() -> BTreeMap> { diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index f37c901475a..84f98a3dba1 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -23,13 +23,12 @@ use pallet_nis::WithMaximumOf; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, - OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionInfo, Signature, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, + slashing, AccountId, AccountIndex, ApprovalVotingParams, Balance, BlockNumber, CandidateEvent, + CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, + NodeFeatures, Nonce, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, + SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + PARACHAIN_KEY_TYPE_ID, }; use rococo_runtime_constants::system_parachain::BROKER_ID; use runtime_common::{ @@ -53,9 +52,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, - runtime_api_impl::{ - v7 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl, - }, + runtime_api_impl::v10 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -2017,15 +2014,15 @@ sp_api::impl_runtime_apis! { } fn approval_voting_params() -> ApprovalVotingParams { - parachains_staging_runtime_api_impl::approval_voting_params::() + parachains_runtime_api_impl::approval_voting_params::() } fn disabled_validators() -> Vec { - parachains_staging_runtime_api_impl::disabled_validators::() + parachains_runtime_api_impl::disabled_validators::() } fn node_features() -> NodeFeatures { - parachains_staging_runtime_api_impl::node_features::() + parachains_runtime_api_impl::node_features::() } } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 62c3741c56d..446cd101eff 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -27,11 +27,10 @@ use sp_std::{collections::btree_map::BTreeMap, prelude::*}; use polkadot_runtime_parachains::{ assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, - disputes::slashing as parachains_slashing, - dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, - initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, - paras_inherent as parachains_paras_inherent, - runtime_api_impl::{v7 as runtime_impl, vstaging as staging_runtime_impl}, + disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, + inclusion as parachains_inclusion, initializer as parachains_initializer, + origin as parachains_origin, paras as parachains_paras, + paras_inherent as parachains_paras_inherent, runtime_api_impl::v10 as runtime_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -971,16 +970,16 @@ sp_api::impl_runtime_apis! { runtime_impl::async_backing_params::() } - fn approval_voting_params() -> primitives::vstaging::ApprovalVotingParams { - staging_runtime_impl::approval_voting_params::() + fn approval_voting_params() -> primitives::ApprovalVotingParams { + runtime_impl::approval_voting_params::() } fn disabled_validators() -> Vec { - staging_runtime_impl::disabled_validators::() + runtime_impl::disabled_validators::() } - fn node_features() -> primitives::vstaging::NodeFeatures { - staging_runtime_impl::node_features::() + fn node_features() -> primitives::NodeFeatures { + runtime_impl::node_features::() } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index d75c1011d5f..c2651c4a447 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -45,14 +45,12 @@ use pallet_session::historical as session_historical; use pallet_transaction_payment::{CurrencyAdapter, FeeDetails, RuntimeDispatchInfo}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, PARACHAIN_KEY_TYPE_ID, + slashing, AccountId, AccountIndex, ApprovalVotingParams, Balance, BlockNumber, CandidateEvent, + CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, + NodeFeatures, Nonce, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, + ValidatorIndex, ValidatorSignature, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ assigned_slots, auctions, crowdloan, @@ -75,9 +73,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::{ - v7 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl, - }, + runtime_api_impl::v10 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -2080,15 +2076,15 @@ sp_api::impl_runtime_apis! { } fn approval_voting_params() -> ApprovalVotingParams { - parachains_staging_runtime_api_impl::approval_voting_params::() + parachains_runtime_api_impl::approval_voting_params::() } fn disabled_validators() -> Vec { - parachains_staging_runtime_api_impl::disabled_validators::() + parachains_runtime_api_impl::disabled_validators::() } fn node_features() -> NodeFeatures { - parachains_staging_runtime_api_impl::node_features::() + parachains_runtime_api_impl::node_features::() } } -- GitLab From 9805ba2cd01922f81621e0f3ac8adc0180fb7a49 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Mon, 1 Apr 2024 23:18:57 +0300 Subject: [PATCH 073/128] Fix links (#3928) Fix links Related CI failure: https://github.com/paritytech/polkadot-sdk/actions/runs/8455425042/job/23162858534?pr=3859 --- .github/workflows/check-links.yml | 4 ++-- docs/sdk/src/polkadot_sdk/substrate.rs | 2 +- docs/sdk/src/polkadot_sdk/templates.rs | 5 ++--- docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs | 2 +- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 903d7a3fcb3..58065f369c9 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -3,8 +3,8 @@ name: Check links on: pull_request: paths: - - "*.rs" - - "*.prdoc" + - "**.rs" + - "**.prdoc" - ".github/workflows/check-links.yml" - ".config/lychee.toml" types: [opened, synchronize, reopened, ready_for_review] diff --git a/docs/sdk/src/polkadot_sdk/substrate.rs b/docs/sdk/src/polkadot_sdk/substrate.rs index 5021c55e581..69d74d86db1 100644 --- a/docs/sdk/src/polkadot_sdk/substrate.rs +++ b/docs/sdk/src/polkadot_sdk/substrate.rs @@ -99,7 +99,7 @@ //! demonstration. //! * [`chain_spec_builder`]: Utility to build more detailed chain-specs for the aforementioned //! node. Other projects typically contain a `build-spec` subcommand that does the same. -//! * [`node_template`](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin/node-template): +//! * [`node_template`](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin/node): //! a template node that contains a minimal set of features and can act as a starting point of a //! project. //! * [`subkey`]: Substrate's key management utility. diff --git a/docs/sdk/src/polkadot_sdk/templates.rs b/docs/sdk/src/polkadot_sdk/templates.rs index f60c75b8f21..4bf0e839c79 100644 --- a/docs/sdk/src/polkadot_sdk/templates.rs +++ b/docs/sdk/src/polkadot_sdk/templates.rs @@ -22,9 +22,8 @@ //! - [`frontier-parachain-template`](https://github.com/paritytech/frontier-parachain-template): A //! parachain template for launching EVM-compatible parachains. //! -//! [`substrate-node-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/substrate/bin/node-template/ -//! [`substrate-minimal-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/substrate/bin/minimal/ -//! [`cumulus-parachain-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/cumulus/parachain-template/ +//! [`minimal-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/templates/minimal/ +//! [`parachain-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/templates/parachain/ // TODO: in general, we need to make a deliberate choice here of moving a few key templates to this // repo (nothing stays in `substrate-developer-hub`) and the everything else should be community diff --git a/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs b/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs index 099512cf4ee..379b0c11b2a 100644 --- a/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs +++ b/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs @@ -117,7 +117,7 @@ //! - **Contract Code Updates**: Once deployed, although typically immutable, Smart Contracts can be //! upgraded, but lack of migration logic. The [pallet_contracts](../../../pallet_contracts/index.html) //! allows for contracts to be upgraded by exposing the `set_code` dispatchable. More details on this -//! can be found in [Ink! documentation on upgradeable contracts](https://use.ink/5.x/basics/upgradeable-contracts). +//! can be found in [Ink! documentation on upgradeable contracts](https://use.ink/basics/upgradeable-contracts). //! - **Isolated Impact**: Upgrades or changes to a smart contract generally impact only that //! contract and its users, unlike Runtime upgrades that have a network-wide effect. //! - **Simplicity and Rapid Development**: The development cycle for Smart Contracts is usually -- GitLab From bf1ca86f87f4dbdc137928e0e6b1b6a9eee1bb09 Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Tue, 2 Apr 2024 00:36:18 +0300 Subject: [PATCH 074/128] pallet-scheduler: fix test (#3923) fix https://github.com/paritytech/polkadot-sdk/issues/3921 --------- Signed-off-by: Andrei Sandu --- substrate/frame/scheduler/src/tests.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/substrate/frame/scheduler/src/tests.rs b/substrate/frame/scheduler/src/tests.rs index 44035533639..f251dde99a8 100644 --- a/substrate/frame/scheduler/src/tests.rs +++ b/substrate/frame/scheduler/src/tests.rs @@ -2285,9 +2285,18 @@ fn postponed_named_task_cannot_be_rescheduled() { // Run to a very large block. run_to_block(10); + // It was not executed. assert!(logger::log().is_empty()); - assert!(Preimage::is_requested(&hash)); + + // Preimage was not available + assert_eq!( + System::events().last().unwrap().event, + crate::Event::CallUnavailable { task: (4, 0), id: Some(name) }.into() + ); + + // So it should not be requested. + assert!(!Preimage::is_requested(&hash)); // Postponing removes the lookup. assert!(!Lookup::::contains_key(name)); @@ -2307,11 +2316,12 @@ fn postponed_named_task_cannot_be_rescheduled() { ); // Finally add the preimage. - assert_ok!(Preimage::note(call.encode().into())); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode())); + run_to_block(1000); // It did not execute. assert!(logger::log().is_empty()); - assert!(Preimage::is_requested(&hash)); + assert!(!Preimage::is_requested(&hash)); // Manually re-schedule the call by name does not work. assert_err!( -- GitLab From 52e103784945997cb3808cdfaaf72c468f8fc938 Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Mon, 1 Apr 2024 23:40:38 +0200 Subject: [PATCH 075/128] `im-online` removal final cleanup (#3902) Rejoice! Rejoice! The story is nearly over. This PR removes stale migrations, auxiliary structures, and package dependencies, thus making Rococo and Westend totally free from any `im-online`-related stuff. `im-online` still stays a part of the Substrate node and its runtime: https://github.com/paritytech/polkadot-sdk/blob/0d9324847391e902bb42f84f0e76096b1f764efe/substrate/bin/node/runtime/src/lib.rs#L2276-L2277 I'm not sure if it makes sense to remove it from there considering that we're not removing `im-online` from FRAME. Please share your opinion. --- Cargo.lock | 4 - cumulus/test/service/Cargo.toml | 2 - polkadot/node/service/Cargo.toml | 3 - polkadot/runtime/rococo/Cargo.toml | 4 - polkadot/runtime/rococo/src/lib.rs | 137 +-------------------------- polkadot/runtime/westend/Cargo.toml | 4 - polkadot/runtime/westend/src/lib.rs | 139 +--------------------------- 7 files changed, 2 insertions(+), 291 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 81eb682a27d..02071ca38c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4322,7 +4322,6 @@ dependencies = [ "frame-system-rpc-runtime-api", "futures", "jsonrpsee", - "pallet-im-online", "pallet-timestamp", "pallet-transaction-payment", "parachains-common", @@ -13450,7 +13449,6 @@ dependencies = [ "log", "mmr-gadget", "pallet-babe", - "pallet-im-online", "pallet-staking", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", @@ -15054,7 +15052,6 @@ dependencies = [ "pallet-elections-phragmen", "pallet-grandpa", "pallet-identity", - "pallet-im-online", "pallet-indices", "pallet-membership", "pallet-message-queue", @@ -21905,7 +21902,6 @@ dependencies = [ "pallet-fast-unstake", "pallet-grandpa", "pallet-identity", - "pallet-im-online", "pallet-indices", "pallet-membership", "pallet-message-queue", diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 45e21432f5b..113e0aca68a 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -88,7 +88,6 @@ pallet-timestamp = { path = "../../../substrate/frame/timestamp" } futures = "0.3.28" portpicker = "0.1.1" rococo-parachain-runtime = { path = "../../parachains/runtimes/testing/rococo-parachain" } -pallet-im-online = { path = "../../../substrate/frame/im-online" } sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } cumulus-test-client = { path = "../client" } @@ -106,7 +105,6 @@ runtime-benchmarks = [ "cumulus-primitives-core/runtime-benchmarks", "cumulus-test-client/runtime-benchmarks", "frame-system/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 5a42443c84c..932f3e679f4 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -65,7 +65,6 @@ sp-version = { path = "../../../substrate/primitives/version" } # Substrate Pallets pallet-babe = { path = "../../../substrate/frame/babe" } -pallet-im-online = { path = "../../../substrate/frame/im-online" } pallet-staking = { path = "../../../substrate/frame/staking" } pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api" } frame-system = { path = "../../../substrate/frame/system" } @@ -197,7 +196,6 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-babe/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", @@ -213,7 +211,6 @@ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-babe/try-runtime", - "pallet-im-online/try-runtime", "pallet-staking/try-runtime", "pallet-transaction-payment/try-runtime", "polkadot-runtime-parachains/try-runtime", diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 6f63a93cebe..ff178b17070 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -60,7 +60,6 @@ pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-feat frame-executive = { path = "../../../substrate/frame/executive", default-features = false } pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } -pallet-im-online = { path = "../../../substrate/frame/im-online", default-features = false } pallet-indices = { path = "../../../substrate/frame/indices", default-features = false } pallet-membership = { path = "../../../substrate/frame/membership", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } @@ -153,7 +152,6 @@ std = [ "pallet-elections-phragmen/std", "pallet-grandpa/std", "pallet-identity/std", - "pallet-im-online/std", "pallet-indices/std", "pallet-membership/std", "pallet-message-queue/std", @@ -228,7 +226,6 @@ runtime-benchmarks = [ "pallet-elections-phragmen/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", "pallet-identity/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", "pallet-indices/runtime-benchmarks", "pallet-membership/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", @@ -284,7 +281,6 @@ try-runtime = [ "pallet-elections-phragmen/try-runtime", "pallet-grandpa/try-runtime", "pallet-identity/try-runtime", - "pallet-im-online/try-runtime", "pallet-indices/try-runtime", "pallet-membership/try-runtime", "pallet-message-queue/try-runtime", diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 84f98a3dba1..7d16d2dbf16 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -88,8 +88,7 @@ use sp_runtime::{ IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, BoundToRuntimeAppPublic, FixedU128, KeyTypeId, Perbill, Percent, Permill, - RuntimeAppPublic, RuntimeDebug, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, RuntimeDebug, }; use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] @@ -347,46 +346,6 @@ impl pallet_authorship::Config for Runtime { type EventHandler = (); } -#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] -pub struct OldSessionKeys { - pub grandpa: ::Public, - pub babe: ::Public, - pub im_online: pallet_im_online::sr25519::AuthorityId, - pub para_validator: ::Public, - pub para_assignment: ::Public, - pub authority_discovery: ::Public, - pub beefy: ::Public, -} - -impl OpaqueKeys for OldSessionKeys { - type KeyTypeIdProviders = (); - fn key_ids() -> &'static [KeyTypeId] { - &[ - <::Public>::ID, - <::Public>::ID, - sp_core::crypto::key_types::IM_ONLINE, - <::Public>::ID, - <::Public>::ID, - <::Public>::ID, - <::Public>::ID, - ] - } - fn get_raw(&self, i: KeyTypeId) -> &[u8] { - match i { - <::Public>::ID => self.grandpa.as_ref(), - <::Public>::ID => self.babe.as_ref(), - sp_core::crypto::key_types::IM_ONLINE => self.im_online.as_ref(), - <::Public>::ID => self.para_validator.as_ref(), - <::Public>::ID => - self.para_assignment.as_ref(), - <::Public>::ID => - self.authority_discovery.as_ref(), - <::Public>::ID => self.beefy.as_ref(), - _ => &[], - } - } -} - impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, @@ -398,18 +357,6 @@ impl_opaque_keys! { } } -// remove this when removing `OldSessionKeys` -fn transform_session_keys(_val: AccountId, old: OldSessionKeys) -> SessionKeys { - SessionKeys { - grandpa: old.grandpa, - babe: old.babe, - para_validator: old.para_validator, - para_assignment: old.para_assignment, - authority_discovery: old.authority_discovery, - beefy: old.beefy, - } -} - /// Special `ValidatorIdOf` implementation that is just returning the input as result. pub struct ValidatorIdOf; impl sp_runtime::traits::Convert> for ValidatorIdOf { @@ -1486,8 +1433,6 @@ pub mod migrations { use frame_support::traits::LockIdentifier; use frame_system::pallet_prelude::BlockNumberFor; - #[cfg(feature = "try-runtime")] - use sp_core::crypto::ByteArray; pub struct GetLegacyLeaseImpl; impl coretime::migration::GetLegacyLease for GetLegacyLeaseImpl { @@ -1514,7 +1459,6 @@ pub mod migrations { pub const PhragmenElectionPalletName: &'static str = "PhragmenElection"; pub const TechnicalMembershipPalletName: &'static str = "TechnicalMembership"; pub const TipsPalletName: &'static str = "Tips"; - pub const ImOnlinePalletName: &'static str = "ImOnline"; pub const PhragmenElectionPalletId: LockIdentifier = *b"phrelect"; } @@ -1551,79 +1495,6 @@ pub mod migrations { type PalletName = TipsPalletName; } - /// Upgrade Session keys to exclude `ImOnline` key. - /// When this is removed, should also remove `OldSessionKeys`. - pub struct UpgradeSessionKeys; - const UPGRADE_SESSION_KEYS_FROM_SPEC: u32 = 104000; - - impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!(target: "runtime::session_keys", "Skipping session keys migration pre-upgrade check due to spec version (already applied?)"); - return Ok(Vec::new()); - } - - log::info!(target: "runtime::session_keys", "Collecting pre-upgrade session keys state"); - let key_ids = SessionKeys::key_ids(); - frame_support::ensure!( - key_ids.into_iter().find(|&k| *k == sp_core::crypto::key_types::IM_ONLINE) == None, - "New session keys contain the ImOnline key that should have been removed", - ); - let storage_key = pallet_session::QueuedKeys::::hashed_key(); - let mut state: Vec = Vec::new(); - frame_support::storage::unhashed::get::>( - &storage_key, - ) - .ok_or::("Queued keys are not available".into())? - .into_iter() - .for_each(|(id, keys)| { - state.extend_from_slice(id.as_slice()); - for key_id in key_ids { - state.extend_from_slice(keys.get_raw(*key_id)); - } - }); - frame_support::ensure!(state.len() > 0, "Queued keys are not empty before upgrade"); - Ok(state) - } - - fn on_runtime_upgrade() -> Weight { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::info!("Skipping session keys upgrade: already applied"); - return ::DbWeight::get().reads(1); - } - log::trace!("Upgrading session keys"); - Session::upgrade_keys::(transform_session_keys); - Perbill::from_percent(50) * BlockWeights::get().max_block - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade( - old_state: sp_std::vec::Vec, - ) -> Result<(), sp_runtime::TryRuntimeError> { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!(target: "runtime::session_keys", "Skipping session keys migration post-upgrade check due to spec version (already applied?)"); - return Ok(()); - } - - let key_ids = SessionKeys::key_ids(); - let mut new_state: Vec = Vec::new(); - pallet_session::QueuedKeys::::get().into_iter().for_each(|(id, keys)| { - new_state.extend_from_slice(id.as_slice()); - for key_id in key_ids { - new_state.extend_from_slice(keys.get_raw(*key_id)); - } - }); - frame_support::ensure!(new_state.len() > 0, "Queued keys are not empty after upgrade"); - frame_support::ensure!( - old_state == new_state, - "Pre-upgrade and post-upgrade keys do not match!" - ); - log::info!(target: "runtime::session_keys", "Session keys migrated successfully"); - Ok(()) - } - } - // We don't have a limit in the Relay Chain. const IDENTITY_MIGRATION_KEY_LIMIT: u64 = u64::MAX; @@ -1657,12 +1528,6 @@ pub mod migrations { pallet_grandpa::migrations::MigrateV4ToV5, parachains_configuration::migration::v10::MigrateToV10, - // Upgrade `SessionKeys` to exclude `ImOnline` - UpgradeSessionKeys, - - // Remove `im-online` pallet on-chain storage - frame_support::migrations::RemovePallet::DbWeight>, - // Migrate Identity pallet for Usernames pallet_identity::migration::versioned::V0ToV1, parachains_configuration::migration::v11::MigrateToV11, diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 587a6c9a590..4c27d4f6d1f 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -64,7 +64,6 @@ pallet-election-provider-multi-phase = { path = "../../../substrate/frame/electi pallet-fast-unstake = { path = "../../../substrate/frame/fast-unstake", default-features = false } pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } -pallet-im-online = { path = "../../../substrate/frame/im-online", default-features = false } pallet-indices = { path = "../../../substrate/frame/indices", default-features = false } pallet-membership = { path = "../../../substrate/frame/membership", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } @@ -167,7 +166,6 @@ std = [ "pallet-fast-unstake/std", "pallet-grandpa/std", "pallet-identity/std", - "pallet-im-online/std", "pallet-indices/std", "pallet-membership/std", "pallet-message-queue/std", @@ -251,7 +249,6 @@ runtime-benchmarks = [ "pallet-fast-unstake/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", "pallet-identity/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", "pallet-indices/runtime-benchmarks", "pallet-membership/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", @@ -310,7 +307,6 @@ try-runtime = [ "pallet-fast-unstake/try-runtime", "pallet-grandpa/try-runtime", "pallet-identity/try-runtime", - "pallet-im-online/try-runtime", "pallet-indices/try-runtime", "pallet-membership/try-runtime", "pallet-message-queue/try-runtime", diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index c2651c4a447..9445e27f0e5 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -88,8 +88,7 @@ use sp_runtime::{ Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, BoundToRuntimeAppPublic, FixedU128, KeyTypeId, Perbill, Percent, Permill, - RuntimeAppPublic, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, }; use sp_staking::SessionIndex; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -405,46 +404,6 @@ parameter_types! { pub const Offset: BlockNumber = 0; } -#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] -pub struct OldSessionKeys { - pub grandpa: ::Public, - pub babe: ::Public, - pub im_online: pallet_im_online::sr25519::AuthorityId, - pub para_validator: ::Public, - pub para_assignment: ::Public, - pub authority_discovery: ::Public, - pub beefy: ::Public, -} - -impl OpaqueKeys for OldSessionKeys { - type KeyTypeIdProviders = (); - fn key_ids() -> &'static [KeyTypeId] { - &[ - <::Public>::ID, - <::Public>::ID, - sp_core::crypto::key_types::IM_ONLINE, - <::Public>::ID, - <::Public>::ID, - <::Public>::ID, - <::Public>::ID, - ] - } - fn get_raw(&self, i: KeyTypeId) -> &[u8] { - match i { - <::Public>::ID => self.grandpa.as_ref(), - <::Public>::ID => self.babe.as_ref(), - sp_core::crypto::key_types::IM_ONLINE => self.im_online.as_ref(), - <::Public>::ID => self.para_validator.as_ref(), - <::Public>::ID => - self.para_assignment.as_ref(), - <::Public>::ID => - self.authority_discovery.as_ref(), - <::Public>::ID => self.beefy.as_ref(), - _ => &[], - } - } -} - impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, @@ -456,18 +415,6 @@ impl_opaque_keys! { } } -// remove this when removing `OldSessionKeys` -fn transform_session_keys(_v: AccountId, old: OldSessionKeys) -> SessionKeys { - SessionKeys { - grandpa: old.grandpa, - babe: old.babe, - para_validator: old.para_validator, - para_assignment: old.para_assignment, - authority_discovery: old.authority_discovery, - beefy: old.beefy, - } -} - impl pallet_session::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; @@ -1657,8 +1604,6 @@ pub type Migrations = migrations::Unreleased; #[allow(deprecated, missing_docs)] pub mod migrations { use super::*; - #[cfg(feature = "try-runtime")] - use sp_core::crypto::ByteArray; pub struct GetLegacyLeaseImpl; impl coretime::migration::GetLegacyLease for GetLegacyLeaseImpl { @@ -1678,83 +1623,6 @@ pub mod migrations { } } - parameter_types! { - pub const ImOnlinePalletName: &'static str = "ImOnline"; - } - - /// Upgrade Session keys to exclude `ImOnline` key. - /// When this is removed, should also remove `OldSessionKeys`. - pub struct UpgradeSessionKeys; - const UPGRADE_SESSION_KEYS_FROM_SPEC: u32 = 104000; - - impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!(target: "runtime::session_keys", "Skipping session keys migration pre-upgrade check due to spec version (already applied?)"); - return Ok(Vec::new()); - } - - log::info!(target: "runtime::session_keys", "Collecting pre-upgrade session keys state"); - let key_ids = SessionKeys::key_ids(); - frame_support::ensure!( - key_ids.into_iter().find(|&k| *k == sp_core::crypto::key_types::IM_ONLINE) == None, - "New session keys contain the ImOnline key that should have been removed", - ); - let storage_key = pallet_session::QueuedKeys::::hashed_key(); - let mut state: Vec = Vec::new(); - frame_support::storage::unhashed::get::>( - &storage_key, - ) - .ok_or::("Queued keys are not available".into())? - .into_iter() - .for_each(|(id, keys)| { - state.extend_from_slice(id.as_slice()); - for key_id in key_ids { - state.extend_from_slice(keys.get_raw(*key_id)); - } - }); - frame_support::ensure!(state.len() > 0, "Queued keys are not empty before upgrade"); - Ok(state) - } - - fn on_runtime_upgrade() -> Weight { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!("Skipping session keys upgrade: already applied"); - return ::DbWeight::get().reads(1); - } - log::info!("Upgrading session keys"); - Session::upgrade_keys::(transform_session_keys); - Perbill::from_percent(50) * BlockWeights::get().max_block - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade( - old_state: sp_std::vec::Vec, - ) -> Result<(), sp_runtime::TryRuntimeError> { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!(target: "runtime::session_keys", "Skipping session keys migration post-upgrade check due to spec version (already applied?)"); - return Ok(()); - } - - let key_ids = SessionKeys::key_ids(); - let mut new_state: Vec = Vec::new(); - pallet_session::QueuedKeys::::get().into_iter().for_each(|(id, keys)| { - new_state.extend_from_slice(id.as_slice()); - for key_id in key_ids { - new_state.extend_from_slice(keys.get_raw(*key_id)); - } - }); - frame_support::ensure!(new_state.len() > 0, "Queued keys are not empty after upgrade"); - frame_support::ensure!( - old_state == new_state, - "Pre-upgrade and post-upgrade keys do not match!" - ); - log::info!(target: "runtime::session_keys", "Session keys migrated successfully"); - Ok(()) - } - } - // We don't have a limit in the Relay Chain. const IDENTITY_MIGRATION_KEY_LIMIT: u64 = u64::MAX; @@ -1771,11 +1639,6 @@ pub mod migrations { pallet_grandpa::migrations::MigrateV4ToV5, parachains_configuration::migration::v10::MigrateToV10, pallet_nomination_pools::migration::unversioned::TotalValueLockedSync, - UpgradeSessionKeys, - frame_support::migrations::RemovePallet< - ImOnlinePalletName, - ::DbWeight, - >, // Migrate Identity pallet for Usernames pallet_identity::migration::versioned::V0ToV1, parachains_configuration::migration::v11::MigrateToV11, -- GitLab From 9a62de27a98312741b4ece2fcd1c6e61b47ee3c2 Mon Sep 17 00:00:00 2001 From: Sam Johnson Date: Tue, 2 Apr 2024 01:53:51 -0400 Subject: [PATCH 076/128] Update derive syn parse 0.2.0 (+ docify) (#3920) derive-syn-parse v0.2.0 came out recently which (finally) adds support for syn 2x. Upgrading to this will remove many of the places where syn 1x was still compiling alongside syn 2x in the polkadot-sdk workspace. This also upgrades `docify` to 0.2.8 which is the version that upgrades derive-syn-pasre to 0.2.0. Additionally, this consolidates the `docify` versions in the repo to all use the latest, and in one case upgrades to the 0.2x syntax where 0.1.x was still being used. --------- Co-authored-by: Liam Aharon --- Cargo.lock | 95 ++++++++----------- .../storage-weight-reclaim/Cargo.toml | 2 +- docs/sdk/Cargo.toml | 2 +- substrate/client/chain-spec/Cargo.toml | 2 +- substrate/frame/Cargo.toml | 2 +- substrate/frame/bags-list/Cargo.toml | 2 +- substrate/frame/balances/Cargo.toml | 2 +- .../single-block-migrations/Cargo.toml | 2 +- substrate/frame/fast-unstake/Cargo.toml | 2 +- substrate/frame/migrations/Cargo.toml | 2 +- substrate/frame/migrations/src/lib.rs | 2 +- substrate/frame/paged-list/Cargo.toml | 2 +- substrate/frame/parameters/Cargo.toml | 2 +- substrate/frame/safe-mode/Cargo.toml | 2 +- substrate/frame/scheduler/Cargo.toml | 2 +- substrate/frame/sudo/Cargo.toml | 2 +- substrate/frame/support/Cargo.toml | 2 +- substrate/frame/support/procedural/Cargo.toml | 2 +- substrate/frame/system/Cargo.toml | 2 +- substrate/frame/timestamp/Cargo.toml | 2 +- substrate/frame/treasury/Cargo.toml | 2 +- substrate/frame/tx-pause/Cargo.toml | 2 +- substrate/primitives/arithmetic/Cargo.toml | 2 +- substrate/primitives/runtime/Cargo.toml | 2 +- 24 files changed, 63 insertions(+), 78 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 02071ca38c8..88fde5ed154 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4049,7 +4049,7 @@ dependencies = [ "cumulus-primitives-core", "cumulus-primitives-proof-size-hostfunction", "cumulus-test-runtime", - "docify 0.2.7", + "docify", "frame-support", "frame-system", "log", @@ -4571,6 +4571,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive-syn-parse" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -4718,47 +4729,21 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "docify" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1b04e6ef3d21119d3eb7b032bca17f99fe041e9c072f30f32cc0e1a2b1f3c4" -dependencies = [ - "docify_macros 0.1.16", -] - -[[package]] -name = "docify" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc4fd38aaa9fb98ac70794c82a00360d1e165a87fbf96a8a91f9dfc602aaee2" +checksum = "43a2f138ad521dc4a2ced1a4576148a6a610b4c5923933b062a263130a6802ce" dependencies = [ - "docify_macros 0.2.7", + "docify_macros", ] [[package]] name = "docify_macros" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b5610df7f2acf89a1bb5d1a66ae56b1c7fcdcfe3948856fb3ace3f644d70eb7" -dependencies = [ - "common-path", - "derive-syn-parse", - "lazy_static", - "proc-macro2", - "quote", - "regex", - "syn 2.0.53", - "termcolor", - "walkdir", -] - -[[package]] -name = "docify_macros" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63fa215f3a0d40fb2a221b3aa90d8e1fbb8379785a990cb60d62ac71ebdc6460" +checksum = "1a081e51fb188742f5a7a1164ad752121abcb22874b21e2c3b0dd040c515fdad" dependencies = [ "common-path", - "derive-syn-parse", + "derive-syn-parse 0.2.0", "once_cell", "proc-macro2", "quote", @@ -5454,7 +5439,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" name = "frame" version = "0.0.1-dev" dependencies = [ - "docify 0.2.7", + "docify", "frame-executive", "frame-support", "frame-system", @@ -5683,7 +5668,7 @@ dependencies = [ "array-bytes 6.1.0", "assert_matches", "bitflags 1.3.2", - "docify 0.2.7", + "docify", "environmental", "frame-metadata", "frame-support-procedural", @@ -5726,7 +5711,7 @@ version = "23.0.0" dependencies = [ "Inflector", "cfg-expr", - "derive-syn-parse", + "derive-syn-parse 0.2.0", "expander 2.0.0", "frame-support-procedural-tools", "itertools 0.10.5", @@ -5827,7 +5812,7 @@ version = "28.0.0" dependencies = [ "cfg-if", "criterion 0.4.0", - "docify 0.2.7", + "docify", "frame-support", "log", "parity-scale-codec", @@ -7944,7 +7929,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "468155613a44cfd825f1fb0ffa532b018253920d404e6fca1e8d43155198a46d" dependencies = [ "const-random", - "derive-syn-parse", + "derive-syn-parse 0.1.5", "macro_magic_core_macros", "proc-macro2", "quote", @@ -9216,7 +9201,7 @@ name = "pallet-bags-list" version = "27.0.0" dependencies = [ "aquamarine 0.5.0", - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -9264,7 +9249,7 @@ dependencies = [ name = "pallet-balances" version = "28.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -9883,7 +9868,7 @@ dependencies = [ name = "pallet-example-single-block-migrations" version = "0.0.1" dependencies = [ - "docify 0.2.7", + "docify", "frame-executive", "frame-support", "frame-system", @@ -9949,7 +9934,7 @@ dependencies = [ name = "pallet-fast-unstake" version = "27.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -10150,7 +10135,7 @@ dependencies = [ name = "pallet-migrations" version = "1.0.0" dependencies = [ - "docify 0.1.16", + "docify", "frame-benchmarking", "frame-executive", "frame-support", @@ -10454,7 +10439,7 @@ dependencies = [ name = "pallet-paged-list" version = "0.6.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10496,7 +10481,7 @@ dependencies = [ name = "pallet-parameters" version = "0.0.1" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10657,7 +10642,7 @@ dependencies = [ name = "pallet-safe-mode" version = "9.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10714,7 +10699,7 @@ dependencies = [ name = "pallet-scheduler" version = "29.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10927,7 +10912,7 @@ dependencies = [ name = "pallet-sudo" version = "28.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10957,7 +10942,7 @@ dependencies = [ name = "pallet-timestamp" version = "27.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -11061,7 +11046,7 @@ dependencies = [ name = "pallet-treasury" version = "27.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -11081,7 +11066,7 @@ dependencies = [ name = "pallet-tx-pause" version = "9.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -13370,7 +13355,7 @@ version = "0.0.1" dependencies = [ "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", - "docify 0.2.7", + "docify", "frame", "frame-executive", "frame-support", @@ -15632,7 +15617,7 @@ name = "sc-chain-spec" version = "27.0.0" dependencies = [ "array-bytes 6.1.0", - "docify 0.2.7", + "docify", "log", "memmap2 0.9.3", "parity-scale-codec", @@ -18409,7 +18394,7 @@ name = "sp-arithmetic" version = "23.0.0" dependencies = [ "criterion 0.4.0", - "docify 0.2.7", + "docify", "integer-sqrt", "num-traits", "parity-scale-codec", @@ -18959,7 +18944,7 @@ dependencies = [ name = "sp-runtime" version = "31.0.1" dependencies = [ - "docify 0.2.7", + "docify", "either", "hash256-std-hasher", "impl-trait-for-tuples", diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index 73e0f03cd37..54eec3ffb5e 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -22,7 +22,7 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false cumulus-primitives-core = { path = "../core", default-features = false } cumulus-primitives-proof-size-hostfunction = { path = "../proof-size-hostfunction", default-features = false } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 3b8f45d7756..64b23866f0c 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -27,7 +27,7 @@ pallet-example-offchain-worker = { path = "../../substrate/frame/examples/offcha # How we build docs in rust-docs simple-mermaid = "0.1.1" -docify = "0.2.7" +docify = "0.2.8" # Polkadot SDK deps, typically all should only be in scope such that we can link to their doc item. node-cli = { package = "staging-node-cli", path = "../../substrate/bin/node/cli" } diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index f2138c07d71..f569b5f14a6 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -34,7 +34,7 @@ sp-runtime = { path = "../../primitives/runtime" } sp-state-machine = { path = "../../primitives/state-machine" } log = { workspace = true } array-bytes = { version = "6.1" } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] substrate-test-runtime = { path = "../../test-utils/runtime" } diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 6746723e72f..ab394592071 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -51,7 +51,7 @@ sp-inherents = { default-features = false, path = "../primitives/inherents", opt frame-executive = { default-features = false, path = "../frame/executive", optional = true } frame-system-rpc-runtime-api = { default-features = false, path = "../frame/system/rpc/runtime-api", optional = true } -docify = "0.2.7" +docify = "0.2.8" log = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index f9ae462e16d..49d28482c32 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -34,7 +34,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau # third party log = { workspace = true } -docify = "0.2.7" +docify = "0.2.8" aquamarine = { version = "0.5.0" } # Optional imports for benchmarking diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 64ae90c6757..b27a5bb2478 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -24,7 +24,7 @@ frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } -docify = "0.2.6" +docify = "0.2.8" [dev-dependencies] pallet-transaction-payment = { path = "../transaction-payment" } diff --git a/substrate/frame/examples/single-block-migrations/Cargo.toml b/substrate/frame/examples/single-block-migrations/Cargo.toml index 613742a6787..1020cc9e2bb 100644 --- a/substrate/frame/examples/single-block-migrations/Cargo.toml +++ b/substrate/frame/examples/single-block-migrations/Cargo.toml @@ -13,7 +13,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -docify = { version = "0.2.3", default-features = false } +docify = "0.2.8" log = { version = "0.4.21", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index eca8247845e..fb425dc310d 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -30,7 +30,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] pallet-staking-reward-curve = { path = "../staking/reward-curve" } diff --git a/substrate/frame/migrations/Cargo.toml b/substrate/frame/migrations/Cargo.toml index 40059fb9ec4..0a91d2f94c4 100644 --- a/substrate/frame/migrations/Cargo.toml +++ b/substrate/frame/migrations/Cargo.toml @@ -12,7 +12,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -docify = "0.1.14" +docify = "0.2.8" impl-trait-for-tuples = "0.2.2" log = "0.4.21" scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } diff --git a/substrate/frame/migrations/src/lib.rs b/substrate/frame/migrations/src/lib.rs index cd57d89f440..30ed9c08830 100644 --- a/substrate/frame/migrations/src/lib.rs +++ b/substrate/frame/migrations/src/lib.rs @@ -35,7 +35,7 @@ //! succeeding after two steps. A runtime upgrade is then enacted and the block number is advanced //! until all migrations finish executing. Afterwards, the recorded historic migrations are //! checked and events are asserted. -#![doc = docify::embed!("substrate/frame/migrations/src/tests.rs", simple_works)] +#![doc = docify::embed!("src/tests.rs", simple_works)] //! //! ## Pallet API //! diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index 6a2af120f32..bbe8e33d484 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -docify = "0.2.7" +docify = "0.2.8" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/parameters/Cargo.toml b/substrate/frame/parameters/Cargo.toml index 07ebeea52d5..ab93be14e6c 100644 --- a/substrate/frame/parameters/Cargo.toml +++ b/substrate/frame/parameters/Cargo.toml @@ -12,7 +12,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.1.2", default-features = false, features = ["derive"] } paste = { version = "1.0.14", default-features = false } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -docify = "0.2.5" +docify = "0.2.8" frame-support = { path = "../support", default-features = false, features = ["experimental"] } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/safe-mode/Cargo.toml b/substrate/frame/safe-mode/Cargo.toml index 0c59740bef3..6ddeff263c1 100644 --- a/substrate/frame/safe-mode/Cargo.toml +++ b/substrate/frame/safe-mode/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -docify = "0.2.7" +docify = "0.2.8" frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index f50f6afdc06..a3e684a2083 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -23,7 +23,7 @@ sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } sp-weights = { path = "../../primitives/weights", default-features = false } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] pallet-preimage = { path = "../preimage" } diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index 409104aeca1..a60324847f1 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -25,7 +25,7 @@ sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] sp-core = { path = "../../primitives/core" } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index be60068f122..3a61cfa6fac 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -58,7 +58,7 @@ k256 = { version = "0.13.1", default-features = false, features = ["ecdsa"] } environmental = { version = "1.1.4", default-features = false } sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } serde_json = { features = ["alloc"], workspace = true } -docify = "0.2.7" +docify = "0.2.8" static_assertions = "1.1.0" aquamarine = { version = "0.5.0" } diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index dd0688f2ad0..9f8727f7ade 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -derive-syn-parse = "0.1.5" +derive-syn-parse = "0.2.0" Inflector = "0.11.4" cfg-expr = "0.15.5" itertools = "0.10.3" diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index 416969e9c47..d094c6bf984 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -28,7 +28,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false, feat sp-std = { path = "../../primitives/std", default-features = false } sp-version = { path = "../../primitives/version", default-features = false, features = ["serde"] } sp-weights = { path = "../../primitives/weights", default-features = false, features = ["serde"] } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] criterion = "0.4.0" diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index 28e57fcab0a..d8ba45a2ad2 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -30,7 +30,7 @@ sp-std = { path = "../../primitives/std", default-features = false } sp-storage = { path = "../../primitives/storage", default-features = false } sp-timestamp = { path = "../../primitives/timestamp", default-features = false } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] sp-core = { path = "../../primitives/core" } diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index 5f90904123d..16bb4e92520 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -docify = "0.2.7" +docify = "0.2.8" impl-trait-for-tuples = "0.2.2" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index ace2172454e..a5916c048f4 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -docify = "0.2.7" +docify = "0.2.8" frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 120edd06a66..45f48d77a31 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -27,7 +27,7 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive serde = { features = ["alloc", "derive"], optional = true, workspace = true } static_assertions = "1.1.0" sp-std = { path = "../std", default-features = false } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] criterion = "0.4.0" diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index cacfc059722..3128ebce8f7 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -32,7 +32,7 @@ sp-core = { path = "../core", default-features = false } sp-io = { path = "../io", default-features = false } sp-std = { path = "../std", default-features = false } sp-weights = { path = "../weights", default-features = false } -docify = { version = "0.2.7" } +docify = "0.2.8" simple-mermaid = { version = "0.1.1", optional = true } -- GitLab From d0ebb850ed2cefeb3e4ef8b8e0a16eb7fb6b3f3e Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 2 Apr 2024 10:57:35 +0300 Subject: [PATCH 077/128] pallet-xcm: fix weights for all XTs and deprecate unlimited weight ones (#3927) Fix "double-weights" for extrinsics, use only the ones benchmarked in the runtime. Deprecate extrinsics that don't specify WeightLimit, remove their usage across the repo. --------- Signed-off-by: Adrian Catangiu Co-authored-by: command-bot <> --- .../asset-hub-rococo/src/tests/teleport.rs | 143 ------------------ .../asset-hub-westend/src/tests/teleport.rs | 143 ------------------ .../bridge-hub-rococo/src/tests/snowbridge.rs | 3 +- .../src/weights/pallet_xcm.rs | 92 +++++------ .../parachains/runtimes/test-utils/src/lib.rs | 3 +- polkadot/runtime/westend/src/tests.rs | 3 +- polkadot/xcm/pallet-xcm/src/lib.rs | 131 ++-------------- .../pallet-xcm/src/tests/assets_transfer.rs | 81 +++------- polkadot/xcm/xcm-simulator/example/src/lib.rs | 3 +- prdoc/pr_3927.prdoc | 13 ++ 10 files changed, 103 insertions(+), 512 deletions(-) create mode 100644 prdoc/pr_3927.prdoc diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs index 4432999aa95..1cbb7fb8c19 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -245,16 +245,6 @@ fn relay_limited_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { ) } -fn relay_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { ::PolkadotXcm::limited_teleport_assets( t.signed_origin, @@ -266,16 +256,6 @@ fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResu ) } -fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn para_to_system_para_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { ::PolkadotXcm::transfer_assets( t.signed_origin, @@ -421,129 +401,6 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { assert_eq!(receiver_balance_after, receiver_balance_before); } -/// Teleport of native asset from Relay Chain to the System Parachain should work -#[test] -fn teleport_native_assets_from_relay_to_system_para_works() { - // Init values for Relay Chain - let amount_to_send: Balance = ROCOCO_ED * 1000; - let dest = Rococo::child_location_of(AssetHubRococo::para_id()); - let beneficiary_id = AssetHubRococoReceiver::get(); - let test_args = TestContext { - sender: RococoSender::get(), - receiver: AssetHubRococoReceiver::get(), - args: TestArgs::new_relay(dest, beneficiary_id, amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(para_dest_assertions); - test.set_dispatchable::(relay_teleport_assets); - test.assert(); - - let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::teleport_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - -/// Teleport of native asset from System Parachains to the Relay Chain -/// should work when there is enough balance in Relay Chain's `CheckAccount` -#[test] -fn teleport_native_assets_back_from_system_para_to_relay_works() { - // Dependency - Relay Chain's `CheckAccount` should have enough balance - teleport_native_assets_from_relay_to_system_para_works(); - - // Init values for Relay Chain - let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; - let destination = AssetHubRococo::parent_location(); - let beneficiary_id = RococoReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: RococoReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions); - test.set_dispatchable::(system_para_teleport_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::teleport_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - -/// Teleport of native asset from System Parachain to Relay Chain -/// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount` -#[test] -fn teleport_native_assets_from_system_para_to_relay_fails() { - // Init values for Relay Chain - let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; - let destination = AssetHubRococo::parent_location(); - let beneficiary_id = RococoReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: RococoReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions_fail); - test.set_dispatchable::(system_para_teleport_assets); - test.assert(); - - let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::teleport_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance does not change - assert_eq!(receiver_balance_after, receiver_balance_before); -} - #[test] fn teleport_to_other_system_parachains_works() { let amount = ASSET_HUB_ROCOCO_ED * 100; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index aba05ea4322..ac518d2ed4a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -245,16 +245,6 @@ fn relay_limited_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { ) } -fn relay_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { ::PolkadotXcm::limited_teleport_assets( t.signed_origin, @@ -266,16 +256,6 @@ fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResu ) } -fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn para_to_system_para_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { ::PolkadotXcm::transfer_assets( t.signed_origin, @@ -421,129 +401,6 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { assert_eq!(receiver_balance_after, receiver_balance_before); } -/// Teleport of native asset from Relay Chain to the System Parachain should work -#[test] -fn teleport_native_assets_from_relay_to_system_para_works() { - // Init values for Relay Chain - let amount_to_send: Balance = WESTEND_ED * 1000; - let dest = Westend::child_location_of(AssetHubWestend::para_id()); - let beneficiary_id = AssetHubWestendReceiver::get(); - let test_args = TestContext { - sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: TestArgs::new_relay(dest, beneficiary_id, amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(para_dest_assertions); - test.set_dispatchable::(relay_teleport_assets); - test.assert(); - - let delivery_fees = Westend::execute_with(|| { - xcm_helpers::teleport_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - -/// Teleport of native asset from System Parachains to the Relay Chain -/// should work when there is enough balance in Relay Chain's `CheckAccount` -#[test] -fn teleport_native_assets_back_from_system_para_to_relay_works() { - // Dependency - Relay Chain's `CheckAccount` should have enough balance - teleport_native_assets_from_relay_to_system_para_works(); - - // Init values for Relay Chain - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let destination = AssetHubWestend::parent_location(); - let beneficiary_id = WestendReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions); - test.set_dispatchable::(system_para_teleport_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::teleport_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - -/// Teleport of native asset from System Parachain to Relay Chain -/// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount` -#[test] -fn teleport_native_assets_from_system_para_to_relay_fails() { - // Init values for Relay Chain - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let destination = AssetHubWestend::parent_location(); - let beneficiary_id = WestendReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions_fail); - test.set_dispatchable::(system_para_teleport_assets); - test.assert(); - - let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::teleport_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance does not change - assert_eq!(receiver_balance_after, receiver_balance_before); -} - #[test] fn teleport_to_other_system_parachains_works() { let amount = ASSET_HUB_WESTEND_ED * 100; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index caaf24e00a8..1804f9d4b67 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -458,12 +458,13 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { AssetHubRococoReceiver::get(), ); // Send the Weth back to Ethereum - ::PolkadotXcm::reserve_transfer_assets( + ::PolkadotXcm::limited_reserve_transfer_assets( RuntimeOrigin::signed(AssetHubRococoReceiver::get()), Box::new(destination), Box::new(beneficiary), Box::new(multi_assets), 0, + Unlimited, ) .unwrap(); let free_balance_after = ::Balances::free_balance( diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index 299e4b8b3cd..a36c25f9604 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-f3xfxtob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 21_722_000 picoseconds. - Weight::from_parts(22_253_000, 0) + // Minimum execution time: 21_050_000 picoseconds. + Weight::from_parts(21_834_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -86,8 +86,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 21_694_000 picoseconds. - Weight::from_parts(22_326_000, 0) + // Minimum execution time: 21_164_000 picoseconds. + Weight::from_parts(21_656_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -112,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 94_422_000 picoseconds. - Weight::from_parts(96_997_000, 0) + // Minimum execution time: 92_497_000 picoseconds. + Weight::from_parts(95_473_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -140,8 +140,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `367` // Estimated: `6196` - // Minimum execution time: 123_368_000 picoseconds. - Weight::from_parts(125_798_000, 0) + // Minimum execution time: 120_059_000 picoseconds. + Weight::from_parts(122_894_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -170,8 +170,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 142_033_000 picoseconds. - Weight::from_parts(145_702_000, 0) + // Minimum execution time: 141_977_000 picoseconds. + Weight::from_parts(145_981_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -180,16 +180,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_558_000 picoseconds. - Weight::from_parts(7_916_000, 0) + // Minimum execution time: 7_426_000 picoseconds. + Weight::from_parts(7_791_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_blob() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_978_000 picoseconds. - Weight::from_parts(8_210_000, 0) + // Minimum execution time: 7_585_000 picoseconds. + Weight::from_parts(7_897_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) @@ -198,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_439_000 picoseconds. - Weight::from_parts(6_711_000, 0) + // Minimum execution time: 6_224_000 picoseconds. + Weight::from_parts(6_793_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -209,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_982_000 picoseconds. - Weight::from_parts(2_260_000, 0) + // Minimum execution time: 1_812_000 picoseconds. + Weight::from_parts(2_008_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -236,8 +236,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 27_120_000 picoseconds. - Weight::from_parts(28_048_000, 0) + // Minimum execution time: 26_586_000 picoseconds. + Weight::from_parts(27_181_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -262,8 +262,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 29_354_000 picoseconds. - Weight::from_parts(30_205_000, 0) + // Minimum execution time: 28_295_000 picoseconds. + Weight::from_parts(29_280_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -274,8 +274,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_926_000 picoseconds. - Weight::from_parts(2_013_000, 0) + // Minimum execution time: 1_803_000 picoseconds. + Weight::from_parts(1_876_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -285,8 +285,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `13524` - // Minimum execution time: 18_611_000 picoseconds. - Weight::from_parts(19_120_000, 0) + // Minimum execution time: 18_946_000 picoseconds. + Weight::from_parts(19_456_000, 0) .saturating_add(Weight::from_parts(0, 13524)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -297,8 +297,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `163` // Estimated: `13528` - // Minimum execution time: 18_373_000 picoseconds. - Weight::from_parts(18_945_000, 0) + // Minimum execution time: 19_080_000 picoseconds. + Weight::from_parts(19_498_000, 0) .saturating_add(Weight::from_parts(0, 13528)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -309,8 +309,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `16013` - // Minimum execution time: 20_459_000 picoseconds. - Weight::from_parts(20_951_000, 0) + // Minimum execution time: 20_637_000 picoseconds. + Weight::from_parts(21_388_000, 0) .saturating_add(Weight::from_parts(0, 16013)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -332,8 +332,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 26_003_000 picoseconds. - Weight::from_parts(26_678_000, 0) + // Minimum execution time: 25_701_000 picoseconds. + Weight::from_parts(26_269_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -344,8 +344,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `11096` - // Minimum execution time: 11_557_000 picoseconds. - Weight::from_parts(11_868_000, 0) + // Minimum execution time: 11_949_000 picoseconds. + Weight::from_parts(12_249_000, 0) .saturating_add(Weight::from_parts(0, 11096)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -355,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `170` // Estimated: `13535` - // Minimum execution time: 18_710_000 picoseconds. - Weight::from_parts(19_240_000, 0) + // Minimum execution time: 19_278_000 picoseconds. + Weight::from_parts(19_538_000, 0) .saturating_add(Weight::from_parts(0, 13535)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -379,8 +379,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `13577` - // Minimum execution time: 34_393_000 picoseconds. - Weight::from_parts(35_138_000, 0) + // Minimum execution time: 35_098_000 picoseconds. + Weight::from_parts(35_871_000, 0) .saturating_add(Weight::from_parts(0, 13577)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -393,8 +393,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_043_000 picoseconds. - Weight::from_parts(4_216_000, 0) + // Minimum execution time: 3_862_000 picoseconds. + Weight::from_parts(4_082_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -405,8 +405,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 25_410_000 picoseconds. - Weight::from_parts(26_019_000, 0) + // Minimum execution time: 25_423_000 picoseconds. + Weight::from_parts(25_872_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -417,8 +417,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 38_850_000 picoseconds. - Weight::from_parts(39_593_000, 0) + // Minimum execution time: 37_148_000 picoseconds. + Weight::from_parts(37_709_000, 0) .saturating_add(Weight::from_parts(0, 3625)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index e62daa16a12..3c84243306f 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -425,12 +425,13 @@ impl< } // do teleport - >::teleport_assets( + >::limited_teleport_assets( origin, Box::new(dest.into()), Box::new(beneficiary.into()), Box::new((AssetId(asset), amount).into()), 0, + Unlimited, ) } } diff --git a/polkadot/runtime/westend/src/tests.rs b/polkadot/runtime/westend/src/tests.rs index bdd599d2b75..4acb81e963b 100644 --- a/polkadot/runtime/westend/src/tests.rs +++ b/polkadot/runtime/westend/src/tests.rs @@ -54,11 +54,12 @@ fn sanity_check_teleport_assets_weight() { // Usually when XCM runs into an issue, it will return a weight of `Weight::MAX`, // so this test will certainly ensure that this problem does not occur. use frame_support::dispatch::GetDispatchInfo; - let weight = pallet_xcm::Call::::teleport_assets { + let weight = pallet_xcm::Call::::limited_teleport_assets { dest: Box::new(Here.into()), beneficiary: Box::new(Here.into()), assets: Box::new((Here, 200_000).into()), fee_asset_item: 0, + weight_limit: Unlimited, } .get_dispatch_info() .weight; diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 29b61988f73..ef255068734 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -940,13 +940,12 @@ pub mod pallet { } } - #[pallet::call] + #[pallet::call(weight(::WeightInfo))] impl Pallet { /// WARNING: DEPRECATED. `send` will be removed after June 2024. Use `send_blob` instead. #[allow(deprecated)] #[deprecated(note = "`send` will be removed after June 2024. Use `send_blob` instead.")] #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::send())] pub fn send( origin: OriginFor, dest: Box, @@ -976,23 +975,10 @@ pub mod pallet { /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. #[pallet::call_index(1)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use sp_std::vec; - let count = assets.len() as u32; - let mut message = Xcm(vec![ - WithdrawAsset(assets), - SetFeesMode { jit_withdraw: true }, - InitiateTeleport { assets: Wild(AllCounted(count)), dest, xcm: Xcm(vec![]) }, - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::teleport_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] + #[allow(deprecated)] + #[deprecated( + note = "This extrinsic uses `WeightLimit::Unlimited`, please migrate to `limited_teleport_assets` or `transfer_assets`" + )] pub fn teleport_assets( origin: OriginFor, dest: Box, @@ -1034,23 +1020,10 @@ pub mod pallet { /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. #[pallet::call_index(2)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use sp_std::vec; - // heaviest version of locally executed XCM program: equivalent in weight to - // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::reserve_transfer_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] + #[allow(deprecated)] + #[deprecated( + note = "This extrinsic uses `WeightLimit::Unlimited`, please migrate to `limited_reserve_transfer_assets` or `transfer_assets`" + )] pub fn reserve_transfer_assets( origin: OriginFor, dest: Box, @@ -1102,7 +1075,6 @@ pub mod pallet { /// - `location`: The destination that is being described. /// - `xcm_version`: The latest version of XCM that `location` supports. #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::force_xcm_version())] pub fn force_xcm_version( origin: OriginFor, location: Box, @@ -1121,7 +1093,6 @@ pub mod pallet { /// - `origin`: Must be an origin specified by AdminOrigin. /// - `maybe_xcm_version`: The default XCM encoding version, or `None` to disable. #[pallet::call_index(5)] - #[pallet::weight(T::WeightInfo::force_default_xcm_version())] pub fn force_default_xcm_version( origin: OriginFor, maybe_xcm_version: Option, @@ -1136,7 +1107,6 @@ pub mod pallet { /// - `origin`: Must be an origin specified by AdminOrigin. /// - `location`: The location to which we should subscribe for XCM version notifications. #[pallet::call_index(6)] - #[pallet::weight(T::WeightInfo::force_subscribe_version_notify())] pub fn force_subscribe_version_notify( origin: OriginFor, location: Box, @@ -1160,7 +1130,6 @@ pub mod pallet { /// - `location`: The location to which we are currently subscribed for XCM version /// notifications which we no longer desire. #[pallet::call_index(7)] - #[pallet::weight(T::WeightInfo::force_unsubscribe_version_notify())] pub fn force_unsubscribe_version_notify( origin: OriginFor, location: Box, @@ -1193,7 +1162,7 @@ pub mod pallet { /// /// Fee payment on the destination side is made from the asset in the `assets` vector of /// index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight - /// is needed than `weight_limit`, then the operation will fail and the assets send may be + /// is needed than `weight_limit`, then the operation will fail and the sent assets may be /// at risk. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. @@ -1208,23 +1177,7 @@ pub mod pallet { /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. #[pallet::call_index(8)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use sp_std::vec; - // heaviest version of locally executed XCM program: equivalent in weight to - // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::reserve_transfer_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] + #[pallet::weight(T::WeightInfo::reserve_transfer_assets())] pub fn limited_reserve_transfer_assets( origin: OriginFor, dest: Box, @@ -1247,7 +1200,7 @@ pub mod pallet { /// /// Fee payment on the destination side is made from the asset in the `assets` vector of /// index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight - /// is needed than `weight_limit`, then the operation will fail and the assets send may be + /// is needed than `weight_limit`, then the operation will fail and the sent assets may be /// at risk. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. @@ -1262,23 +1215,7 @@ pub mod pallet { /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. #[pallet::call_index(9)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use sp_std::vec; - let count = assets.len() as u32; - let mut message = Xcm(vec![ - WithdrawAsset(assets), - SetFeesMode { jit_withdraw: true }, - InitiateTeleport { assets: Wild(AllCounted(count)), dest, xcm: Xcm(vec![]) }, - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::teleport_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] + #[pallet::weight(T::WeightInfo::teleport_assets())] pub fn limited_teleport_assets( origin: OriginFor, dest: Box, @@ -1302,7 +1239,6 @@ pub mod pallet { /// - `origin`: Must be an origin specified by AdminOrigin. /// - `suspended`: `true` to suspend, `false` to resume. #[pallet::call_index(10)] - #[pallet::weight(T::WeightInfo::force_suspension())] pub fn force_suspension(origin: OriginFor, suspended: bool) -> DispatchResult { T::AdminOrigin::ensure_origin(origin)?; XcmExecutionSuspended::::set(suspended); @@ -1315,7 +1251,7 @@ pub mod pallet { /// Fee payment on the destination side is made from the asset in the `assets` vector of /// index `fee_asset_item` (hence referred to as `fees`), up to enough to pay for /// `weight_limit` of weight. If more weight is needed than `weight_limit`, then the - /// operation will fail and the assets sent may be at risk. + /// operation will fail and the sent assets may be at risk. /// /// `assets` (excluding `fees`) must have same reserve location or otherwise be teleportable /// to `dest`, no limitations imposed on `fees`. @@ -1343,26 +1279,6 @@ pub mod pallet { /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. #[pallet::call_index(11)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use sp_std::vec; - // heaviest version of locally executed XCM program: equivalent in weight to withdrawing fees, - // burning them, transferring rest of assets to SA, reanchoring them, extending XCM program, - // and sending onward XCM - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - WithdrawAsset(assets.clone()), - BurnAsset(assets.clone()), - TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::transfer_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] pub fn transfer_assets( origin: OriginFor, dest: Box, @@ -1452,22 +1368,6 @@ pub mod pallet { /// was the latest when they were trapped. /// - `beneficiary`: The location/account where the claimed assets will be deposited. #[pallet::call_index(12)] - #[pallet::weight({ - let assets_version = assets.identify_version(); - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_beneficiary: Result = (*beneficiary.clone()).try_into(); - match (maybe_assets, maybe_beneficiary) { - (Ok(assets), Ok(beneficiary)) => { - let ticket: Location = GeneralIndex(assets_version as u128).into(); - let mut message = Xcm(vec![ - ClaimAsset { assets: assets.clone(), ticket }, - DepositAsset { assets: AllCounted(assets.len() as u32).into(), beneficiary }, - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::claim_assets().saturating_add(w)) - } - _ => Weight::MAX - } - })] pub fn claim_assets( origin: OriginFor, assets: Box, @@ -1514,7 +1414,7 @@ pub mod pallet { /// /// The message is passed in encoded. It needs to be decodable as a [`VersionedXcm`]. #[pallet::call_index(13)] - #[pallet::weight(T::WeightInfo::execute_blob())] + #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute_blob()))] pub fn execute_blob( origin: OriginFor, encoded_message: BoundedVec, @@ -1535,7 +1435,6 @@ pub mod pallet { /// /// The message is passed in encoded. It needs to be decodable as a [`VersionedXcm`]. #[pallet::call_index(14)] - #[pallet::weight(T::WeightInfo::send_blob())] pub fn send_blob( origin: OriginFor, dest: Box, diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index 92d23cfd281..7dc05c1cc70 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -31,13 +31,17 @@ use sp_runtime::{traits::AccountIdConversion, DispatchError, ModuleError}; use xcm::prelude::*; use xcm_executor::traits::ConvertLocation; -// Helper function to deduplicate testing different teleport types. -fn do_test_and_verify_teleport_assets( - origin_location: Location, - expected_beneficiary: Location, - call: Call, - expected_weight_limit: WeightLimit, -) { +/// Test `limited_teleport_assets` +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn limited_teleport_assets_works() { + let origin_location: Location = AccountId32 { network: None, id: ALICE.into() }.into(); + let expected_beneficiary: Location = AccountId32 { network: None, id: BOB.into() }.into(); + let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); + let expected_weight_limit = weight_limit.clone(); + let balances = vec![ (ALICE, INITIAL_BALANCE), (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), @@ -47,7 +51,14 @@ fn do_test_and_verify_teleport_assets( let weight = BaseXcmWeight::get() * 2; assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); // call extrinsic - call(); + assert_ok!(XcmPallet::limited_teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(expected_beneficiary.clone().into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + weight_limit, + )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); assert_eq!( sent_xcm(), @@ -88,57 +99,6 @@ fn do_test_and_verify_teleport_assets( }); } -/// Test `teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn teleport_assets_works() { - let origin_location: Location = AccountId32 { network: None, id: ALICE.into() }.into(); - let beneficiary: Location = AccountId32 { network: None, id: BOB.into() }.into(); - do_test_and_verify_teleport_assets( - origin_location.clone(), - beneficiary.clone(), - || { - assert_ok!(XcmPallet::teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(beneficiary.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - }, - Unlimited, - ); -} - -/// Test `limited_teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn limited_teleport_assets_works() { - let origin_location: Location = AccountId32 { network: None, id: ALICE.into() }.into(); - let beneficiary: Location = AccountId32 { network: None, id: BOB.into() }.into(); - let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); - let expected_weight_limit = weight_limit.clone(); - do_test_and_verify_teleport_assets( - origin_location.clone(), - beneficiary.clone(), - || { - assert_ok!(XcmPallet::limited_teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(beneficiary.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - weight_limit, - )); - }, - expected_weight_limit, - ); -} - /// `limited_teleport_assets` should fail for filtered assets #[test] fn limited_teleport_filtered_assets_disallowed() { @@ -184,12 +144,13 @@ fn reserve_transfer_assets_with_paid_router_works() { let dest: Location = Junction::AccountId32 { network: None, id: user_account.clone().into() }.into(); assert_eq!(Balances::total_balance(&user_account), INITIAL_BALANCE); - assert_ok!(XcmPallet::reserve_transfer_assets( + assert_ok!(XcmPallet::limited_reserve_transfer_assets( RuntimeOrigin::signed(user_account.clone()), Box::new(Parachain(paid_para_id).into()), Box::new(dest.clone().into()), Box::new((Here, SEND_AMOUNT).into()), 0, + Unlimited, )); // XCM_FEES_NOT_WAIVED_USER_ACCOUNT spent amount diff --git a/polkadot/xcm/xcm-simulator/example/src/lib.rs b/polkadot/xcm/xcm-simulator/example/src/lib.rs index 13210179e91..56e204bf571 100644 --- a/polkadot/xcm/xcm-simulator/example/src/lib.rs +++ b/polkadot/xcm/xcm-simulator/example/src/lib.rs @@ -250,12 +250,13 @@ mod tests { let withdraw_amount = 123; Relay::execute_with(|| { - assert_ok!(RelayChainPalletXcm::reserve_transfer_assets( + assert_ok!(RelayChainPalletXcm::limited_reserve_transfer_assets( relay_chain::RuntimeOrigin::signed(ALICE), Box::new(Parachain(1).into()), Box::new(AccountId32 { network: None, id: ALICE.into() }.into()), Box::new((Here, withdraw_amount).into()), 0, + Unlimited, )); assert_eq!( relay_chain::Balances::free_balance(&child_account_id(1)), diff --git a/prdoc/pr_3927.prdoc b/prdoc/pr_3927.prdoc new file mode 100644 index 00000000000..a568636d0bd --- /dev/null +++ b/prdoc/pr_3927.prdoc @@ -0,0 +1,13 @@ +title: "pallet-xcm: deprecate transfer extrinsics without weight limit" + +doc: + - audience: Runtime Dev + description: | + pallet-xcm's extrinsics `teleport_assets` and `reserve_transfer_assets` have been + marked as deprecated. Please change their usage to the `limited_teleport_assets` + and `limited_reserve_transfer_assets`, respectively; or use the generic/flexible + `transfer_assets` extrinsic. + +crates: +- name: pallet-xcm + bump: minor -- GitLab From 12eb285dbe6271c365db7ba17cf643bfc77fe753 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 2 Apr 2024 11:44:23 +0200 Subject: [PATCH 078/128] Fix parachain upgrade scheduling when done by the owner/root (#3341) When using `schedule_code_upgrade` to change the code of a parachain in the relay chain runtime, we had already fixed to not set the `GoAhead` signal. This was done to not brick any parachain after the upgrade, because they were seeing the signal without having any upgrade prepared. The remaining problem is that the parachain code is only upgraded after a parachain header was enacted, aka the parachain made some progress. However, this is quite complicated if the parachain is bricked (which is the most common scenario why to manually schedule a code upgrade). Thus, this pull request replaces `SetGoAhead` with `UpgradeStrategy` to signal to the logic kind of strategy want to use. The strategies are either `SetGoAheadSignal` or `ApplyAtExpectedBlock`. `SetGoAheadSignal` sets the go ahead signal as before and awaits a parachain block. `ApplyAtExpectedBlock` schedules the upgrade and applies it directly at the `expected_block` without waiting for the parachain to make any kind of progress. --- .../runtime/common/src/paras_registrar/mod.rs | 15 +- .../runtime/parachains/src/inclusion/mod.rs | 4 +- .../runtime/parachains/src/inclusion/tests.rs | 4 +- polkadot/runtime/parachains/src/lib.rs | 4 +- .../parachains/src/paras/benchmarking.rs | 2 +- .../src/paras/benchmarking/pvf_check.rs | 2 +- polkadot/runtime/parachains/src/paras/mod.rs | 226 ++++++++++++------ .../runtime/parachains/src/paras/tests.rs | 131 ++++------ prdoc/pr_3341.prdoc | 18 ++ 9 files changed, 237 insertions(+), 169 deletions(-) create mode 100644 prdoc/pr_3341.prdoc diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 5b2098388d8..7abe23917e4 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -29,7 +29,7 @@ use frame_system::{self, ensure_root, ensure_signed}; use primitives::{HeadData, Id as ParaId, ValidationCode, LOWEST_PUBLIC_ID, MIN_CODE_SIZE}; use runtime_parachains::{ configuration, ensure_parachain, - paras::{self, ParaGenesisArgs, SetGoAhead}, + paras::{self, ParaGenesisArgs, UpgradeStrategy}, Origin, ParaLifecycle, }; use sp_std::{prelude::*, result}; @@ -408,6 +408,13 @@ pub mod pallet { /// Schedule a parachain upgrade. /// + /// This will kick off a check of `new_code` by all validators. After the majority of the + /// validators have reported on the validity of the code, the code will either be enacted + /// or the upgrade will be rejected. If the code will be enacted, the current code of the + /// parachain will be overwritten directly. This means that any PoV will be checked by this + /// new code. The parachain itself will not be informed explictely that the validation code + /// has changed. + /// /// Can be called by Root, the parachain, or the parachain manager if the parachain is /// unlocked. #[pallet::call_index(7)] @@ -418,7 +425,11 @@ pub mod pallet { new_code: ValidationCode, ) -> DispatchResult { Self::ensure_root_para_or_owner(origin, para)?; - runtime_parachains::schedule_code_upgrade::(para, new_code, SetGoAhead::No)?; + runtime_parachains::schedule_code_upgrade::( + para, + new_code, + UpgradeStrategy::ApplyAtExpectedBlock, + )?; Ok(()) } diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index e77f8d15b40..34afdec724a 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -22,7 +22,7 @@ use crate::{ configuration::{self, HostConfiguration}, disputes, dmp, hrmp, - paras::{self, SetGoAhead}, + paras::{self, UpgradeStrategy}, scheduler, shared::{self, AllowedRelayParentsTracker}, util::make_persisted_validation_data_with_parent, @@ -839,7 +839,7 @@ impl Pallet { new_code, now, &config, - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, )); } diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 5ab3a13324d..97bf67ef934 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -1590,7 +1590,7 @@ fn candidate_checks() { vec![9, 8, 7, 6, 5, 4, 3, 2, 1].into(), expected_at, &cfg, - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); } @@ -2857,7 +2857,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { let cause = &active_vote_state.causes()[0]; // Upgrade block is the block of inclusion, not candidate's parent. assert_matches!(cause, - paras::PvfCheckCause::Upgrade { id, included_at, set_go_ahead: SetGoAhead::Yes } + paras::PvfCheckCause::Upgrade { id, included_at, upgrade_strategy: UpgradeStrategy::SetGoAheadSignal } if id == &chain_a && included_at == &7 ); }); diff --git a/polkadot/runtime/parachains/src/lib.rs b/polkadot/runtime/parachains/src/lib.rs index b0dc27b7286..466bc7685dd 100644 --- a/polkadot/runtime/parachains/src/lib.rs +++ b/polkadot/runtime/parachains/src/lib.rs @@ -54,7 +54,7 @@ mod mock; mod ump_tests; pub use origin::{ensure_parachain, Origin}; -pub use paras::{ParaLifecycle, SetGoAhead}; +pub use paras::{ParaLifecycle, UpgradeStrategy}; use primitives::{HeadData, Id as ParaId, ValidationCode}; use sp_runtime::{DispatchResult, FixedU128}; @@ -104,7 +104,7 @@ pub fn schedule_parachain_downgrade(id: ParaId) -> Result<(), pub fn schedule_code_upgrade( id: ParaId, new_code: ValidationCode, - set_go_ahead: SetGoAhead, + set_go_ahead: UpgradeStrategy, ) -> DispatchResult { paras::Pallet::::schedule_code_upgrade_external(id, new_code, set_go_ahead) } diff --git a/polkadot/runtime/parachains/src/paras/benchmarking.rs b/polkadot/runtime/parachains/src/paras/benchmarking.rs index 554f0c15af2..56e6ff153c1 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking.rs @@ -129,7 +129,7 @@ benchmarks! { ValidationCode(vec![0]), expired, &config, - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); }: _(RawOrigin::Root, para_id, new_head) verify { diff --git a/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs b/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs index 53ccc35c477..0bf5aa86c40 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs @@ -177,7 +177,7 @@ where validation_code, /* relay_parent_number */ 1u32.into(), &configuration::Pallet::::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); } else { let r = Pallet::::schedule_para_initialize( diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index 017cd87f13b..3eb66112fed 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -386,16 +386,32 @@ pub(crate) enum PvfCheckCause { /// /// See https://github.com/paritytech/polkadot/issues/4601 for detailed explanation. included_at: BlockNumber, - /// Whether or not the given para should be sent the `GoAhead` signal. - set_go_ahead: SetGoAhead, + /// Whether or not the upgrade should be enacted directly. + /// + /// If set to `Yes` it means that no `GoAheadSignal` will be set and the parachain code + /// will also be overwritten directly. + upgrade_strategy: UpgradeStrategy, }, } -/// Should the `GoAhead` signal be set after a successful check of the new wasm binary? +/// The strategy on how to handle a validation code upgrade. +/// +/// When scheduling a parachain code upgrade the upgrade first is checked by all validators. The +/// validators ensure that the new validation code can be compiled and instantiated. After the +/// majority of the validators have reported their checking result the upgrade is either scheduled +/// or aborted. This strategy then comes into play around the relay chain block this upgrade was +/// scheduled in. #[derive(Debug, Copy, Clone, PartialEq, TypeInfo, Decode, Encode)] -pub enum SetGoAhead { - Yes, - No, +pub enum UpgradeStrategy { + /// Set the `GoAhead` signal to inform the parachain that it is time to upgrade. + /// + /// The upgrade will then be applied after the first parachain block was enacted that must have + /// observed the `GoAhead` signal. + SetGoAheadSignal, + /// Apply the upgrade directly at the expected relay chain block. + /// + /// This doesn't wait for the parachain to make any kind of progress. + ApplyAtExpectedBlock, } impl PvfCheckCause { @@ -758,7 +774,8 @@ pub mod pallet { pub(super) type PastCodePruning = StorageValue<_, Vec<(ParaId, BlockNumberFor)>, ValueQuery>; - /// The block number at which the planned code change is expected for a para. + /// The block number at which the planned code change is expected for a parachain. + /// /// The change will be applied after the first parablock for this ID included which executes /// in the context of a relay chain block with a number >= `expected_at`. #[pallet::storage] @@ -766,6 +783,18 @@ pub mod pallet { pub(super) type FutureCodeUpgrades = StorageMap<_, Twox64Concat, ParaId, BlockNumberFor>; + /// The list of upcoming future code upgrades. + /// + /// Each item is a pair of the parachain and the expected block at which the upgrade should be + /// applied. The upgrade will be applied at the given relay chain block. In contrast to + /// [`FutureCodeUpgrades`] this code upgrade will be applied regardless the parachain making any + /// progress or not. + /// + /// Ordered ascending by block number. + #[pallet::storage] + pub(super) type FutureCodeUpgradesAt = + StorageValue<_, Vec<(ParaId, BlockNumberFor)>, ValueQuery>; + /// The actual future code hash of a para. /// /// Corresponding code can be retrieved with [`CodeByHash`]. @@ -809,8 +838,10 @@ pub mod pallet { pub(super) type UpgradeCooldowns = StorageValue<_, Vec<(ParaId, BlockNumberFor)>, ValueQuery>; - /// The list of upcoming code upgrades. Each item is a pair of which para performs a code - /// upgrade and at which relay-chain block it is expected at. + /// The list of upcoming code upgrades. + /// + /// Each item is a pair of which para performs a code upgrade and at which relay-chain block it + /// is expected at. /// /// Ordered ascending by block number. #[pallet::storage] @@ -880,21 +911,9 @@ pub mod pallet { new_code: ValidationCode, ) -> DispatchResult { ensure_root(origin)?; - let maybe_prior_code_hash = CurrentCodeHash::::get(¶); let new_code_hash = new_code.hash(); Self::increase_code_ref(&new_code_hash, &new_code); - CurrentCodeHash::::insert(¶, new_code_hash); - - let now = frame_system::Pallet::::block_number(); - if let Some(prior_code_hash) = maybe_prior_code_hash { - Self::note_past_code(para, now, now, prior_code_hash); - } else { - log::error!( - target: LOG_TARGET, - "Pallet paras storage is inconsistent, prior code not found {:?}", - ¶ - ); - } + Self::set_current_code(para, new_code_hash, frame_system::Pallet::::block_number()); Self::deposit_event(Event::CurrentCodeUpdated(para)); Ok(()) } @@ -928,7 +947,7 @@ pub mod pallet { new_code, relay_parent_number, &config, - SetGoAhead::No, + UpgradeStrategy::ApplyAtExpectedBlock, ); Self::deposit_event(Event::CodeUpgradeScheduled(para)); Ok(()) @@ -1227,7 +1246,7 @@ impl Pallet { pub(crate) fn schedule_code_upgrade_external( id: ParaId, new_code: ValidationCode, - set_go_ahead: SetGoAhead, + upgrade_strategy: UpgradeStrategy, ) -> DispatchResult { // Check that we can schedule an upgrade at all. ensure!(Self::can_upgrade_validation_code(id), Error::::CannotUpgradeCode); @@ -1239,7 +1258,7 @@ impl Pallet { let current_block = frame_system::Pallet::::block_number(); // Schedule the upgrade with a delay just like if a parachain triggered the upgrade. let upgrade_block = current_block.saturating_add(config.validation_upgrade_delay); - Self::schedule_code_upgrade(id, new_code, upgrade_block, &config, set_go_ahead); + Self::schedule_code_upgrade(id, new_code, upgrade_block, &config, upgrade_strategy); Self::deposit_event(Event::CodeUpgradeScheduled(id)); Ok(()) } @@ -1252,8 +1271,9 @@ impl Pallet { /// Called by the initializer to initialize the paras pallet. pub(crate) fn initializer_initialize(now: BlockNumberFor) -> Weight { - let weight = Self::prune_old_code(now); - weight + Self::process_scheduled_upgrade_changes(now) + Self::prune_old_code(now) + + Self::process_scheduled_upgrade_changes(now) + + Self::process_future_code_upgrades_at(now) } /// Called by the initializer to finalize the paras pallet. @@ -1355,16 +1375,13 @@ impl Pallet { // NOTE both of those iterates over the list and the outgoing. We do not expect either // of these to be large. Thus should be fine. UpcomingUpgrades::::mutate(|upcoming_upgrades| { - *upcoming_upgrades = mem::take(upcoming_upgrades) - .into_iter() - .filter(|(para, _)| !outgoing.contains(para)) - .collect(); + upcoming_upgrades.retain(|(para, _)| !outgoing.contains(para)); }); UpgradeCooldowns::::mutate(|upgrade_cooldowns| { - *upgrade_cooldowns = mem::take(upgrade_cooldowns) - .into_iter() - .filter(|(para, _)| !outgoing.contains(para)) - .collect(); + upgrade_cooldowns.retain(|(para, _)| !outgoing.contains(para)); + }); + FutureCodeUpgradesAt::::mutate(|future_upgrades| { + future_upgrades.retain(|(para, _)| !outgoing.contains(para)); }); } @@ -1460,6 +1477,37 @@ impl Pallet { T::DbWeight::get().reads_writes(1 + pruning_tasks_done, 2 * pruning_tasks_done) } + /// Process the future code upgrades that should be applied directly. + /// + /// Upgrades that should not be applied directly are being processed in + /// [`Self::process_scheduled_upgrade_changes`]. + fn process_future_code_upgrades_at(now: BlockNumberFor) -> Weight { + // account weight for `FutureCodeUpgradeAt::mutate`. + let mut weight = T::DbWeight::get().reads_writes(1, 1); + FutureCodeUpgradesAt::::mutate( + |upcoming_upgrades: &mut Vec<(ParaId, BlockNumberFor)>| { + let num = upcoming_upgrades.iter().take_while(|&(_, at)| at <= &now).count(); + for (id, expected_at) in upcoming_upgrades.drain(..num) { + weight += T::DbWeight::get().reads_writes(1, 1); + + // Both should always be `Some` in this case, since a code upgrade is scheduled. + let new_code_hash = if let Some(new_code_hash) = FutureCodeHash::::take(&id) + { + new_code_hash + } else { + log::error!(target: LOG_TARGET, "Missing future code hash for {:?}", &id); + continue + }; + + weight += Self::set_current_code(id, new_code_hash, expected_at); + } + num + }, + ); + + weight + } + /// Process the timers related to upgrades. Specifically, the upgrade go ahead signals toggle /// and the upgrade cooldown restrictions. However, this function does not actually unset /// the upgrade restriction, that will happen in the `initializer_finalize` function. However, @@ -1580,14 +1628,14 @@ impl Pallet { PvfCheckCause::Onboarding(id) => { weight += Self::proceed_with_onboarding(*id, sessions_observed); }, - PvfCheckCause::Upgrade { id, included_at, set_go_ahead } => { + PvfCheckCause::Upgrade { id, included_at, upgrade_strategy } => { weight += Self::proceed_with_upgrade( *id, code_hash, now, *included_at, cfg, - *set_go_ahead, + *upgrade_strategy, ); }, } @@ -1621,38 +1669,50 @@ impl Pallet { now: BlockNumberFor, relay_parent_number: BlockNumberFor, cfg: &configuration::HostConfiguration>, - set_go_ahead: SetGoAhead, + upgrade_strategy: UpgradeStrategy, ) -> Weight { let mut weight = Weight::zero(); - // Compute the relay-chain block number starting at which the code upgrade is ready to be - // applied. + // Compute the relay-chain block number starting at which the code upgrade is ready to + // be applied. // - // The first parablock that has a relay-parent higher or at the same height of `expected_at` - // will trigger the code upgrade. The parablock that comes after that will be validated - // against the new validation code. + // The first parablock that has a relay-parent higher or at the same height of + // `expected_at` will trigger the code upgrade. The parablock that comes after that will + // be validated against the new validation code. // - // Here we are trying to choose the block number that will have `validation_upgrade_delay` - // blocks from the relay-parent of inclusion of the the block that scheduled code upgrade - // but no less than `minimum_validation_upgrade_delay`. We want this delay out of caution - // so that when the last vote for pre-checking comes the parachain will have some time until - // the upgrade finally takes place. + // Here we are trying to choose the block number that will have + // `validation_upgrade_delay` blocks from the relay-parent of inclusion of the the block + // that scheduled code upgrade but no less than `minimum_validation_upgrade_delay`. We + // want this delay out of caution so that when the last vote for pre-checking comes the + // parachain will have some time until the upgrade finally takes place. let expected_at = cmp::max( relay_parent_number + cfg.validation_upgrade_delay, now + cfg.minimum_validation_upgrade_delay, ); - weight += T::DbWeight::get().reads_writes(1, 4); - FutureCodeUpgrades::::insert(&id, expected_at); + match upgrade_strategy { + UpgradeStrategy::ApplyAtExpectedBlock => { + FutureCodeUpgradesAt::::mutate(|future_upgrades| { + let insert_idx = future_upgrades + .binary_search_by_key(&expected_at, |&(_, b)| b) + .unwrap_or_else(|idx| idx); + future_upgrades.insert(insert_idx, (id, expected_at)); + }); - // Only set an upcoming upgrade if `GoAhead` signal should be set for the respective para. - if set_go_ahead == SetGoAhead::Yes { - UpcomingUpgrades::::mutate(|upcoming_upgrades| { - let insert_idx = upcoming_upgrades - .binary_search_by_key(&expected_at, |&(_, b)| b) - .unwrap_or_else(|idx| idx); - upcoming_upgrades.insert(insert_idx, (id, expected_at)); - }); + weight += T::DbWeight::get().reads_writes(0, 2); + }, + UpgradeStrategy::SetGoAheadSignal => { + FutureCodeUpgrades::::insert(&id, expected_at); + + UpcomingUpgrades::::mutate(|upcoming_upgrades| { + let insert_idx = upcoming_upgrades + .binary_search_by_key(&expected_at, |&(_, b)| b) + .unwrap_or_else(|idx| idx); + upcoming_upgrades.insert(insert_idx, (id, expected_at)); + }); + + weight += T::DbWeight::get().reads_writes(1, 3); + }, } let expected_at = expected_at.saturated_into(); @@ -1892,7 +1952,7 @@ impl Pallet { new_code: ValidationCode, inclusion_block_number: BlockNumberFor, cfg: &configuration::HostConfiguration>, - set_go_ahead: SetGoAhead, + upgrade_strategy: UpgradeStrategy, ) -> Weight { let mut weight = T::DbWeight::get().reads(1); @@ -1949,7 +2009,7 @@ impl Pallet { }); weight += Self::kick_off_pvf_check( - PvfCheckCause::Upgrade { id, included_at: inclusion_block_number, set_go_ahead }, + PvfCheckCause::Upgrade { id, included_at: inclusion_block_number, upgrade_strategy }, code_hash, new_code, cfg, @@ -2061,24 +2121,10 @@ impl Pallet { log::error!(target: LOG_TARGET, "Missing future code hash for {:?}", &id); return T::DbWeight::get().reads_writes(3, 1 + 3) }; - let maybe_prior_code_hash = CurrentCodeHash::::get(&id); - CurrentCodeHash::::insert(&id, &new_code_hash); - let log = ConsensusLog::ParaUpgradeCode(id, new_code_hash); - >::deposit_log(log.into()); + let weight = Self::set_current_code(id, new_code_hash, expected_at); - // `now` is only used for registering pruning as part of `fn note_past_code` - let now = >::block_number(); - - let weight = if let Some(prior_code_hash) = maybe_prior_code_hash { - Self::note_past_code(id, expected_at, now, prior_code_hash) - } else { - log::error!(target: LOG_TARGET, "Missing prior code hash for para {:?}", &id); - Weight::zero() - }; - - // add 1 to writes due to heads update. - weight + T::DbWeight::get().reads_writes(3, 1 + 3) + weight + T::DbWeight::get().reads_writes(3, 3) } else { T::DbWeight::get().reads_writes(1, 1 + 0) } @@ -2094,6 +2140,34 @@ impl Pallet { weight.saturating_add(T::OnNewHead::on_new_head(id, &new_head)) } + /// Set the current code for the given parachain. + // `at` for para-triggered replacement is the block number of the relay-chain + // block in whose context the parablock was executed + // (i.e. number of `relay_parent` in the receipt) + pub(crate) fn set_current_code( + id: ParaId, + new_code_hash: ValidationCodeHash, + at: BlockNumberFor, + ) -> Weight { + let maybe_prior_code_hash = CurrentCodeHash::::get(&id); + CurrentCodeHash::::insert(&id, &new_code_hash); + + let log = ConsensusLog::ParaUpgradeCode(id, new_code_hash); + >::deposit_log(log.into()); + + // `now` is only used for registering pruning as part of `fn note_past_code` + let now = >::block_number(); + + let weight = if let Some(prior_code_hash) = maybe_prior_code_hash { + Self::note_past_code(id, at, now, prior_code_hash) + } else { + log::error!(target: LOG_TARGET, "Missing prior code hash for para {:?}", &id); + Weight::zero() + }; + + weight + T::DbWeight::get().writes(1) + } + /// Returns the list of PVFs (aka validation code) that require casting a vote by a validator in /// the active validator set. pub(crate) fn pvfs_require_precheck() -> Vec { diff --git a/polkadot/runtime/parachains/src/paras/tests.rs b/polkadot/runtime/parachains/src/paras/tests.rs index 39abd2367b7..ad75166271e 100644 --- a/polkadot/runtime/parachains/src/paras/tests.rs +++ b/polkadot/runtime/parachains/src/paras/tests.rs @@ -451,7 +451,7 @@ fn code_upgrade_applied_after_delay() { new_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -521,7 +521,7 @@ fn code_upgrade_applied_after_delay() { } #[test] -fn code_upgrade_applied_without_setting_go_ahead_signal() { +fn upgrade_strategy_apply_at_expected_block_works() { let code_retention_period = 10; let validation_upgrade_delay = 5; let validation_upgrade_cooldown = 10; @@ -560,77 +560,42 @@ fn code_upgrade_applied_without_setting_go_ahead_signal() { run_to_block(2, Some(vec![1])); assert_eq!(Paras::current_code(¶_id), Some(original_code.clone())); - let (expected_at, next_possible_upgrade_at) = { - // this parablock is in the context of block 1. - let expected_at = 1 + validation_upgrade_delay; - let next_possible_upgrade_at = 1 + validation_upgrade_cooldown; - // `set_go_ahead` parameter set to `false` which prevents signaling the parachain - // with the `GoAhead` signal. - Paras::schedule_code_upgrade( - para_id, - new_code.clone(), - 1, - &Configuration::config(), - SetGoAhead::No, - ); - // Include votes for super-majority. - submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); - - Paras::note_new_head(para_id, Default::default(), 1); - - assert!(Paras::past_code_meta(¶_id).most_recent_change().is_none()); - assert_eq!(FutureCodeUpgrades::::get(¶_id), Some(expected_at)); - assert_eq!(FutureCodeHash::::get(¶_id), Some(new_code.hash())); - assert_eq!(UpcomingUpgrades::::get(), vec![]); - assert_eq!(UpgradeCooldowns::::get(), vec![(para_id, next_possible_upgrade_at)]); - assert_eq!(Paras::current_code(¶_id), Some(original_code.clone())); - check_code_is_stored(&original_code); - check_code_is_stored(&new_code); - - (expected_at, next_possible_upgrade_at) - }; + // this parablock is in the context of block 1. + let expected_at = 1 + validation_upgrade_delay; + let next_possible_upgrade_at = 1 + validation_upgrade_cooldown; + // `set_go_ahead` parameter set to `false` which prevents signaling the parachain + // with the `GoAhead` signal. + Paras::schedule_code_upgrade( + para_id, + new_code.clone(), + 1, + &Configuration::config(), + UpgradeStrategy::ApplyAtExpectedBlock, + ); + // Include votes for super-majority. + submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); + assert!(FutureCodeUpgradesAt::::get().iter().any(|(id, _)| *id == para_id)); + // Going to the expected block triggers the upgrade directly. run_to_block(expected_at, None); - // the candidate is in the context of the parent of `expected_at`, - // thus does not trigger the code upgrade. However, now the `UpgradeGoAheadSignal` - // should not be set. - { - Paras::note_new_head(para_id, Default::default(), expected_at - 1); - - assert!(Paras::past_code_meta(¶_id).most_recent_change().is_none()); - assert_eq!(FutureCodeUpgrades::::get(¶_id), Some(expected_at)); - assert_eq!(FutureCodeHash::::get(¶_id), Some(new_code.hash())); - assert!(UpgradeGoAheadSignal::::get(¶_id).is_none()); - assert_eq!(Paras::current_code(¶_id), Some(original_code.clone())); - check_code_is_stored(&original_code); - check_code_is_stored(&new_code); - } - - run_to_block(expected_at + 1, None); - - // the candidate is in the context of `expected_at`, and triggers - // the upgrade. - { - Paras::note_new_head(para_id, Default::default(), expected_at); + // Reporting a head doesn't change anything. + Paras::note_new_head(para_id, Default::default(), expected_at - 1); - assert_eq!(Paras::past_code_meta(¶_id).most_recent_change(), Some(expected_at)); - assert_eq!( - PastCodeHash::::get(&(para_id, expected_at)), - Some(original_code.hash()), - ); - assert!(FutureCodeUpgrades::::get(¶_id).is_none()); - assert!(FutureCodeHash::::get(¶_id).is_none()); - assert!(UpgradeGoAheadSignal::::get(¶_id).is_none()); - assert_eq!(Paras::current_code(¶_id), Some(new_code.clone())); - assert_eq!( - UpgradeRestrictionSignal::::get(¶_id), - Some(UpgradeRestriction::Present), - ); - assert_eq!(UpgradeCooldowns::::get(), vec![(para_id, next_possible_upgrade_at)]); - check_code_is_stored(&original_code); - check_code_is_stored(&new_code); - } + assert_eq!(Paras::past_code_meta(¶_id).most_recent_change(), Some(expected_at)); + assert_eq!(PastCodeHash::::get(&(para_id, expected_at)), Some(original_code.hash())); + assert!(FutureCodeUpgrades::::get(¶_id).is_none()); + assert!(FutureCodeUpgradesAt::::get().iter().all(|(id, _)| *id != para_id)); + assert!(FutureCodeHash::::get(¶_id).is_none()); + assert!(UpgradeGoAheadSignal::::get(¶_id).is_none()); + assert_eq!(Paras::current_code(¶_id), Some(new_code.clone())); + assert_eq!( + UpgradeRestrictionSignal::::get(¶_id), + Some(UpgradeRestriction::Present), + ); + assert_eq!(UpgradeCooldowns::::get(), vec![(para_id, next_possible_upgrade_at)]); + check_code_is_stored(&original_code); + check_code_is_stored(&new_code); run_to_block(next_possible_upgrade_at + 1, None); @@ -688,7 +653,7 @@ fn code_upgrade_applied_after_delay_even_when_late() { new_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -772,7 +737,7 @@ fn submit_code_change_when_not_allowed_is_err() { new_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -790,7 +755,7 @@ fn submit_code_change_when_not_allowed_is_err() { newer_code.clone(), 2, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); assert_eq!( FutureCodeUpgrades::::get(¶_id), @@ -854,7 +819,7 @@ fn upgrade_restriction_elapsed_doesnt_mean_can_upgrade() { new_code.clone(), 0, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -879,7 +844,7 @@ fn upgrade_restriction_elapsed_doesnt_mean_can_upgrade() { newer_code.clone(), 30, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); assert_eq!(FutureCodeUpgrades::::get(¶_id), Some(0 + validation_upgrade_delay)); }); @@ -940,7 +905,7 @@ fn full_parachain_cleanup_storage() { new_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -1036,7 +1001,7 @@ fn cannot_offboard_ongoing_pvf_check() { new_code.clone(), RELAY_PARENT, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); assert!(!Paras::pvfs_require_precheck().is_empty()); @@ -1194,7 +1159,7 @@ fn code_hash_at_returns_up_to_end_of_code_retention_period() { new_code.clone(), 0, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -1303,7 +1268,7 @@ fn pvf_check_coalescing_onboarding_and_upgrade() { validation_code.clone(), RELAY_PARENT, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); assert!(!Paras::pvfs_require_precheck().is_empty()); @@ -1413,7 +1378,7 @@ fn pvf_check_upgrade_reject() { new_code.clone(), RELAY_PARENT, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); check_code_is_stored(&new_code); @@ -1599,7 +1564,7 @@ fn include_pvf_check_statement_refunds_weight() { new_code.clone(), RELAY_PARENT, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); let mut stmts = IntoIterator::into_iter([0, 1, 2, 3]) @@ -1700,7 +1665,7 @@ fn poke_unused_validation_code_doesnt_remove_code_with_users() { validation_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); Paras::note_new_head(para_id, HeadData::default(), 1); @@ -1771,7 +1736,7 @@ fn add_trusted_validation_code_insta_approval() { validation_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); Paras::note_new_head(para_id, HeadData::default(), 1); @@ -1813,7 +1778,7 @@ fn add_trusted_validation_code_enacts_existing_pvf_vote() { validation_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); Paras::note_new_head(para_id, HeadData::default(), 1); diff --git a/prdoc/pr_3341.prdoc b/prdoc/pr_3341.prdoc new file mode 100644 index 00000000000..de714fa5a1e --- /dev/null +++ b/prdoc/pr_3341.prdoc @@ -0,0 +1,18 @@ +title: "Fix `schedule_code_upgrade` when called by the owner/root" + +doc: + - audience: Runtime User + description: | + Fixes `schedule_code_upgrade` when being used by the owner/root. The call is used for + manually upgrading the validation code of a parachain on the relay chain. It was failing + before because the relay chain waited for the parachain to make progress. However, this + call is mostly used for when a parachain are bricked which means that they are not able + anymore to build any blocks. The fix is to schedule the validation code upgrade and then + to enact it at the scheduled block. The enacting happens now without requiring the parachain + to make any progress. + +crates: + - name: polkadot-runtime-common + bump: minor + - name: polkadot-runtime-parachains + bump: major -- GitLab From db1af43c39f052fbf85a330e74c02a75f65f59a0 Mon Sep 17 00:00:00 2001 From: Javier Viola <363911+pepoviola@users.noreply.github.com> Date: Tue, 2 Apr 2024 13:22:21 +0200 Subject: [PATCH 079/128] chore(zombienet): bump version (#3933) This version includes: - Internal metrics of zombienet (used to benchmark with v2). --- .gitlab/pipeline/zombienet.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 82341eb709f..52948e1eb71 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,7 +1,8 @@ .zombienet-refs: extends: .build-refs variables: - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.98" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.99" + PUSHGATEWAY_URL: "http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics" include: # substrate tests -- GitLab From d5617cf3cd69fb3b7de338c46d8a8778452ce769 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 2 Apr 2024 15:25:56 +0300 Subject: [PATCH 080/128] Update bridges subtree (#3938) Pulling the latest changes from `parity-bridges-common` --- Cargo.lock | 8 ++++---- bridges/README.md | 6 +++--- bridges/bin/runtime-common/Cargo.toml | 4 ++-- bridges/chains/chain-asset-hub-rococo/Cargo.toml | 5 +++-- bridges/chains/chain-asset-hub-westend/Cargo.toml | 5 +++-- bridges/chains/chain-bridge-hub-cumulus/Cargo.toml | 1 + bridges/chains/chain-bridge-hub-kusama/Cargo.toml | 1 + bridges/chains/chain-bridge-hub-polkadot/Cargo.toml | 1 + bridges/chains/chain-bridge-hub-rococo/Cargo.toml | 1 + bridges/chains/chain-bridge-hub-westend/Cargo.toml | 1 + bridges/chains/chain-kusama/Cargo.toml | 1 + bridges/chains/chain-polkadot-bulletin/Cargo.toml | 5 +++-- bridges/chains/chain-polkadot/Cargo.toml | 1 + bridges/chains/chain-rococo/Cargo.toml | 1 + bridges/chains/chain-westend/Cargo.toml | 1 + bridges/modules/grandpa/Cargo.toml | 5 +++-- bridges/modules/messages/Cargo.toml | 5 +++-- bridges/modules/parachains/Cargo.toml | 5 +++-- bridges/modules/relayers/Cargo.toml | 5 +++-- bridges/modules/xcm-bridge-hub-router/Cargo.toml | 5 +++-- bridges/modules/xcm-bridge-hub/Cargo.toml | 5 +++-- bridges/primitives/header-chain/Cargo.toml | 5 +++-- bridges/primitives/messages/Cargo.toml | 5 +++-- bridges/primitives/parachains/Cargo.toml | 5 +++-- bridges/primitives/polkadot-core/Cargo.toml | 5 +++-- bridges/primitives/relayers/Cargo.toml | 5 +++-- bridges/primitives/runtime/Cargo.toml | 5 +++-- bridges/primitives/test-utils/Cargo.toml | 3 ++- bridges/primitives/xcm-bridge-hub-router/Cargo.toml | 5 +++-- bridges/primitives/xcm-bridge-hub/Cargo.toml | 1 + 30 files changed, 69 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 88fde5ed154..9a8eff4691e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17000,9 +17000,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ef2175c2907e7c8bc0a9c3f86aeb5ec1f3b275300ad58a44d0c3ae379a5e52e" +checksum = "788745a868b0e751750388f4e6546eb921ef714a4317fa6954f7cde114eb2eb7" dependencies = [ "bitvec", "cfg-if", @@ -17014,9 +17014,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b8eb8fd61c5cdd3390d9b2132300a7e7618955b98b8416f118c1b4e144f" +checksum = "7dc2f4e8bc344b9fc3d5f74f72c2e55bfc38d28dc2ebc69c194a3df424e4d9ac" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", diff --git a/bridges/README.md b/bridges/README.md index a2ce213d254..8bfa39841f5 100644 --- a/bridges/README.md +++ b/bridges/README.md @@ -38,10 +38,10 @@ cargo test --all ``` Also you can build the repo with [Parity CI Docker -image](https://github.com/paritytech/scripts/tree/master/dockerfiles/bridges-ci): +image](https://github.com/paritytech/scripts/tree/master/dockerfiles/ci-unified): ```bash -docker pull paritytech/bridges-ci:production +docker pull paritytech/ci-unified:latest mkdir ~/cache chown 1000:1000 ~/cache #processes in the container runs as "nonroot" user with UID 1000 docker run --rm -it -w /shellhere/parity-bridges-common \ @@ -49,7 +49,7 @@ docker run --rm -it -w /shellhere/parity-bridges-common \ -v "$(pwd)":/shellhere/parity-bridges-common \ -e CARGO_HOME=/cache/cargo/ \ -e SCCACHE_DIR=/cache/sccache/ \ - -e CARGO_TARGET_DIR=/cache/target/ paritytech/bridges-ci:production cargo build --all + -e CARGO_TARGET_DIR=/cache/target/ paritytech/ci-unified:latest cargo build --all #artifacts can be found in ~/cache/target ``` diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index f00ba1c9734..67b91a16a30 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -11,10 +11,10 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } hash-db = { version = "0.16.0", default-features = false } log = { workspace = true } -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } static_assertions = { version = "1.1", optional = true } # Bridge dependencies diff --git a/bridges/chains/chain-asset-hub-rococo/Cargo.toml b/bridges/chains/chain-asset-hub-rococo/Cargo.toml index 55dc384badd..9a6419a5b40 100644 --- a/bridges/chains/chain-asset-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-asset-hub-rococo/Cargo.toml @@ -5,13 +5,14 @@ version = "0.4.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate Dependencies frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/bridges/chains/chain-asset-hub-westend/Cargo.toml b/bridges/chains/chain-asset-hub-westend/Cargo.toml index 1379b099a2a..1c08ee28e41 100644 --- a/bridges/chains/chain-asset-hub-westend/Cargo.toml +++ b/bridges/chains/chain-asset-hub-westend/Cargo.toml @@ -5,13 +5,14 @@ version = "0.3.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate Dependencies frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml index 5e14cb052b7..4b900002a4d 100644 --- a/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml @@ -5,6 +5,7 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true diff --git a/bridges/chains/chain-bridge-hub-kusama/Cargo.toml b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml index 77bc8e54a9d..ff6dd8849ab 100644 --- a/bridges/chains/chain-bridge-hub-kusama/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml @@ -5,6 +5,7 @@ version = "0.6.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true diff --git a/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml index 5d7a3bbcc1d..da8b8a82fa7 100644 --- a/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml @@ -5,6 +5,7 @@ version = "0.6.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true diff --git a/bridges/chains/chain-bridge-hub-rococo/Cargo.toml b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml index 3966ef72dcb..f7672df012f 100644 --- a/bridges/chains/chain-bridge-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml @@ -5,6 +5,7 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true diff --git a/bridges/chains/chain-bridge-hub-westend/Cargo.toml b/bridges/chains/chain-bridge-hub-westend/Cargo.toml index d35eac8b3fe..ec74c4b947d 100644 --- a/bridges/chains/chain-bridge-hub-westend/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-westend/Cargo.toml @@ -5,6 +5,7 @@ version = "0.3.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true diff --git a/bridges/chains/chain-kusama/Cargo.toml b/bridges/chains/chain-kusama/Cargo.toml index 4ff4cb46976..66061ff2793 100644 --- a/bridges/chains/chain-kusama/Cargo.toml +++ b/bridges/chains/chain-kusama/Cargo.toml @@ -5,6 +5,7 @@ version = "0.5.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true diff --git a/bridges/chains/chain-polkadot-bulletin/Cargo.toml b/bridges/chains/chain-polkadot-bulletin/Cargo.toml index 37e060d897c..2db16a00e92 100644 --- a/bridges/chains/chain-polkadot-bulletin/Cargo.toml +++ b/bridges/chains/chain-polkadot-bulletin/Cargo.toml @@ -5,13 +5,14 @@ version = "0.4.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/chains/chain-polkadot/Cargo.toml b/bridges/chains/chain-polkadot/Cargo.toml index 0db6791f66e..c700935f308 100644 --- a/bridges/chains/chain-polkadot/Cargo.toml +++ b/bridges/chains/chain-polkadot/Cargo.toml @@ -5,6 +5,7 @@ version = "0.5.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true diff --git a/bridges/chains/chain-rococo/Cargo.toml b/bridges/chains/chain-rococo/Cargo.toml index 9c63f960ae4..5a5613bb376 100644 --- a/bridges/chains/chain-rococo/Cargo.toml +++ b/bridges/chains/chain-rococo/Cargo.toml @@ -5,6 +5,7 @@ version = "0.6.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true diff --git a/bridges/chains/chain-westend/Cargo.toml b/bridges/chains/chain-westend/Cargo.toml index f5de9b95c82..10b06d76507 100644 --- a/bridges/chains/chain-westend/Cargo.toml +++ b/bridges/chains/chain-westend/Cargo.toml @@ -5,6 +5,7 @@ version = "0.3.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true diff --git a/bridges/modules/grandpa/Cargo.toml b/bridges/modules/grandpa/Cargo.toml index 25c6c4e03d5..0db1827211a 100644 --- a/bridges/modules/grandpa/Cargo.toml +++ b/bridges/modules/grandpa/Cargo.toml @@ -5,6 +5,7 @@ description = "Module implementing GRANDPA on-chain light client used for bridgi authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -12,10 +13,10 @@ workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } log = { workspace = true } -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index 7d0e1b94959..df5b92db740 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -5,15 +5,16 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge dependencies diff --git a/bridges/modules/parachains/Cargo.toml b/bridges/modules/parachains/Cargo.toml index a9dd9beeb1f..35213be0674 100644 --- a/bridges/modules/parachains/Cargo.toml +++ b/bridges/modules/parachains/Cargo.toml @@ -5,14 +5,15 @@ description = "Module that allows bridged relay chains to exchange information o authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index f3de72da771..e2b7aca9224 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -5,14 +5,15 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge dependencies diff --git a/bridges/modules/xcm-bridge-hub-router/Cargo.toml b/bridges/modules/xcm-bridge-hub-router/Cargo.toml index 98477f2df18..06f2a339bed 100644 --- a/bridges/modules/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub-router/Cargo.toml @@ -5,14 +5,15 @@ version = "0.5.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.11.0", default-features = false, features = ["bit-vec", "derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } # Bridge dependencies diff --git a/bridges/modules/xcm-bridge-hub/Cargo.toml b/bridges/modules/xcm-bridge-hub/Cargo.toml index 68ac32281f3..4483a379090 100644 --- a/bridges/modules/xcm-bridge-hub/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub/Cargo.toml @@ -5,14 +5,15 @@ version = "0.2.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge Dependencies bp-messages = { path = "../../primitives/messages", default-features = false } diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index d96a02efba8..f7a61a9ff32 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -5,14 +5,15 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index 9d742e3eded..d41acfb9d32 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -5,13 +5,14 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.11.0", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies diff --git a/bridges/primitives/parachains/Cargo.toml b/bridges/primitives/parachains/Cargo.toml index 3846c563575..2e7000b86a5 100644 --- a/bridges/primitives/parachains/Cargo.toml +++ b/bridges/primitives/parachains/Cargo.toml @@ -5,14 +5,15 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2" -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge dependencies diff --git a/bridges/primitives/polkadot-core/Cargo.toml b/bridges/primitives/polkadot-core/Cargo.toml index 5ab502569e4..53b1e574cb1 100644 --- a/bridges/primitives/polkadot-core/Cargo.toml +++ b/bridges/primitives/polkadot-core/Cargo.toml @@ -5,14 +5,15 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } parity-util-mem = { version = "0.12.0", optional = true } -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Bridge Dependencies diff --git a/bridges/primitives/relayers/Cargo.toml b/bridges/primitives/relayers/Cargo.toml index 71d0fbf2ec3..1be7f1dc6eb 100644 --- a/bridges/primitives/relayers/Cargo.toml +++ b/bridges/primitives/relayers/Cargo.toml @@ -5,13 +5,14 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.11.0", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } # Bridge Dependencies diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml index 2d454d264a1..cca9c21a608 100644 --- a/bridges/primitives/runtime/Cargo.toml +++ b/bridges/primitives/runtime/Cargo.toml @@ -5,17 +5,18 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } hash-db = { version = "0.16.0", default-features = false } impl-trait-for-tuples = "0.2.2" log = { workspace = true } num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } # Substrate Dependencies diff --git a/bridges/primitives/test-utils/Cargo.toml b/bridges/primitives/test-utils/Cargo.toml index d379e950b86..d314c38683c 100644 --- a/bridges/primitives/test-utils/Cargo.toml +++ b/bridges/primitives/test-utils/Cargo.toml @@ -5,6 +5,7 @@ description = "Utilities for testing substrate-based runtime bridge code" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -14,7 +15,7 @@ bp-header-chain = { path = "../header-chain", default-features = false } bp-parachains = { path = "../parachains", default-features = false } bp-polkadot-core = { path = "../polkadot-core", default-features = false } bp-runtime = { path = "../runtime", default-features = false } -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } ed25519-dalek = { version = "2.1", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } diff --git a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml index 734930f18c4..94eece16d57 100644 --- a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml @@ -5,13 +5,14 @@ version = "0.6.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.11.0", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } # Substrate Dependencies sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } diff --git a/bridges/primitives/xcm-bridge-hub/Cargo.toml b/bridges/primitives/xcm-bridge-hub/Cargo.toml index ad49ec1e831..27881bc99d1 100644 --- a/bridges/primitives/xcm-bridge-hub/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub/Cargo.toml @@ -5,6 +5,7 @@ version = "0.2.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true -- GitLab From 5eff3f94beb768a7ba56d9091f6137fa6dff6bb1 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 2 Apr 2024 15:28:48 +0300 Subject: [PATCH 081/128] beefy: error logs for validators with dummy keys (#3939) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This outputs: ``` 2024-04-02 14:36:02.135 ERROR tokio-runtime-worker beefy: 🥩 for session starting at block 21990151 no BEEFY authority key found in store, you must generate valid session keys (https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#generating-the-session-keys) ``` error log entry, once every session, for nodes running with `Role::Authority` that have no public BEEFY key in their keystore --------- Co-authored-by: Bastian Köcher --- polkadot/node/service/src/lib.rs | 25 +++--- substrate/bin/node/cli/src/service.rs | 1 + substrate/client/consensus/beefy/src/lib.rs | 18 ++++- substrate/client/consensus/beefy/src/tests.rs | 2 + .../client/consensus/beefy/src/worker.rs | 77 ++++--------------- 5 files changed, 50 insertions(+), 73 deletions(-) diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 4f4ede53705..61076477f8e 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -1233,6 +1233,7 @@ pub fn new_full( prometheus_registry: prometheus_registry.clone(), links: beefy_links, on_demand_justifications_handler: beefy_on_demand_justifications_handler, + is_authority: role.is_authority(), }; let gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); @@ -1242,18 +1243,18 @@ pub fn new_full( task_manager .spawn_essential_handle() .spawn_blocking("beefy-gadget", None, gadget); - // When offchain indexing is enabled, MMR gadget should also run. - if is_offchain_indexing_enabled { - task_manager.spawn_essential_handle().spawn_blocking( - "mmr-gadget", - None, - MmrGadget::start( - client.clone(), - backend.clone(), - sp_mmr_primitives::INDEXING_PREFIX.to_vec(), - ), - ); - } + } + // When offchain indexing is enabled, MMR gadget should also run. + if is_offchain_indexing_enabled { + task_manager.spawn_essential_handle().spawn_blocking( + "mmr-gadget", + None, + MmrGadget::start( + client.clone(), + backend.clone(), + sp_mmr_primitives::INDEXING_PREFIX.to_vec(), + ), + ); } let config = grandpa::Config { diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index dddb261a71d..d6e2a29d30b 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -652,6 +652,7 @@ pub fn new_full_base( prometheus_registry: prometheus_registry.clone(), links: beefy_links, on_demand_justifications_handler: beefy_on_demand_justifications_handler, + is_authority: role.is_authority(), }; let beefy_gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 323af1bc830..714a0fb7c88 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -222,6 +222,8 @@ pub struct BeefyParams { pub links: BeefyVoterLinks, /// Handler for incoming BEEFY justifications requests from a remote peer. pub on_demand_justifications_handler: BeefyJustifsRequestHandler, + /// Whether running under "Authority" role. + pub is_authority: bool, } /// Helper object holding BEEFY worker communication/gossip components. /// @@ -270,6 +272,7 @@ where min_block_delta: u32, gossip_validator: Arc>, finality_notifications: &mut Fuse>, + is_authority: bool, ) -> Result { // Wait for BEEFY pallet to be active before starting voter. let (beefy_genesis, best_grandpa) = @@ -283,6 +286,7 @@ where runtime.clone(), &key_store, &metrics, + is_authority, ) .await?; // Update the gossip validator with the right starting round and set id. @@ -301,6 +305,7 @@ where comms: BeefyComms, links: BeefyVoterLinks, pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, + is_authority: bool, ) -> BeefyWorker { BeefyWorker { backend: self.backend, @@ -313,6 +318,7 @@ where comms, links, pending_justifications, + is_authority, } } @@ -423,6 +429,7 @@ where runtime: Arc, key_store: &BeefyKeystore, metrics: &Option, + is_authority: bool, ) -> Result, Error> { // Initialize voter state from AUX DB if compatible. if let Some(mut state) = crate::aux_schema::load_persistent(backend.as_ref())? @@ -455,7 +462,13 @@ where "🥩 Handling missed BEEFY session after node restart: {:?}.", new_session_start ); - state.init_session_at(new_session_start, validator_set, key_store, metrics); + state.init_session_at( + new_session_start, + validator_set, + key_store, + metrics, + is_authority, + ); } return Ok(state) } @@ -491,6 +504,7 @@ pub async fn start_beefy_gadget( prometheus_registry, links, mut on_demand_justifications_handler, + is_authority, } = beefy_params; let BeefyNetworkParams { @@ -553,6 +567,7 @@ pub async fn start_beefy_gadget( min_block_delta, beefy_comms.gossip_validator.clone(), &mut finality_notifications, + is_authority, ).fuse() => { match builder_init_result { Ok(builder) => break builder, @@ -580,6 +595,7 @@ pub async fn start_beefy_gadget( beefy_comms, links.clone(), BTreeMap::new(), + is_authority, ); match futures::future::select( diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index d106c9dcd88..aecfec7b9ed 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -379,6 +379,7 @@ async fn voter_init_setup( Arc::new(api.clone()), &key_store, &metrics, + true, ) .await } @@ -438,6 +439,7 @@ where min_block_delta, prometheus_registry: None, on_demand_justifications_handler: on_demand_justif_handler, + is_authority: true, }; let task = crate::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index 7a47f286ef7..ac6b72d1ea4 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -33,7 +33,7 @@ use crate::{ }; use codec::{Codec, Decode, DecodeAll, Encode}; use futures::{stream::Fuse, FutureExt, StreamExt}; -use log::{debug, error, info, log_enabled, trace, warn}; +use log::{debug, error, info, trace, warn}; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; use sc_utils::notification::NotificationReceiver; use sp_api::ProvideRuntimeApi; @@ -51,7 +51,7 @@ use sp_runtime::{ SaturatedConversion, }; use std::{ - collections::{BTreeMap, BTreeSet, VecDeque}, + collections::{BTreeMap, VecDeque}, fmt::Debug, sync::Arc, }; @@ -332,6 +332,7 @@ impl PersistedState { validator_set: ValidatorSet, key_store: &BeefyKeystore, metrics: &Option, + is_authority: bool, ) { debug!(target: LOG_TARGET, "🥩 New active validator set: {:?}", validator_set); @@ -348,11 +349,16 @@ impl PersistedState { } } - if log_enabled!(target: LOG_TARGET, log::Level::Debug) { - // verify the new validator set - only do it if we're also logging the warning - if verify_validator_set::(&new_session_start, &validator_set, key_store).is_err() { - metric_inc!(metrics, beefy_no_authority_found_in_store); - } + // verify we have some BEEFY key available in keystore when role is authority. + if is_authority && key_store.public_keys().map_or(false, |k| k.is_empty()) { + error!( + target: LOG_TARGET, + "🥩 for session starting at block {:?} no BEEFY authority key found in store, \ + you must generate valid session keys \ + (https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#generating-the-session-keys)", + new_session_start, + ); + metric_inc!(metrics, beefy_no_authority_found_in_store); } let id = validator_set.id(); @@ -390,6 +396,8 @@ pub(crate) struct BeefyWorker { pub persisted_state: PersistedState, /// BEEFY voter metrics pub metrics: Option, + /// Node runs under "Authority" role. + pub is_authority: bool, } impl BeefyWorker @@ -425,6 +433,7 @@ where validator_set, &self.key_store, &self.metrics, + self.is_authority, ); } @@ -1040,33 +1049,6 @@ where } } -/// Verify `active` validator set for `block` against the key store -/// -/// We want to make sure that we have _at least one_ key in our keystore that -/// is part of the validator set, that's because if there are no local keys -/// then we can't perform our job as a validator. -/// -/// Note that for a non-authority node there will be no keystore, and we will -/// return an error and don't check. The error can usually be ignored. -fn verify_validator_set( - block: &NumberFor, - active: &ValidatorSet, - key_store: &BeefyKeystore, -) -> Result<(), Error> { - let active: BTreeSet<&AuthorityId> = active.validators().iter().collect(); - - let public_keys = key_store.public_keys()?; - let store: BTreeSet<&AuthorityId> = public_keys.iter().collect(); - - if store.intersection(&active).count() == 0 { - let msg = "no authority public key found in store".to_string(); - debug!(target: LOG_TARGET, "🥩 for block {:?} {}", block, msg); - Err(Error::Keystore(msg)) - } else { - Ok(()) - } -} - #[cfg(test)] pub(crate) mod tests { use super::*; @@ -1208,6 +1190,7 @@ pub(crate) mod tests { comms, pending_justifications: BTreeMap::new(), persisted_state, + is_authority: true, } } @@ -1471,32 +1454,6 @@ pub(crate) mod tests { assert_eq!(extracted, Some(validator_set)); } - #[tokio::test] - async fn keystore_vs_validator_set() { - let keys = &[Keyring::Alice]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1); - let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone()); - - // keystore doesn't contain other keys than validators' - assert_eq!(verify_validator_set::(&1, &validator_set, &worker.key_store), Ok(())); - - // unknown `Bob` key - let keys = &[Keyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let err_msg = "no authority public key found in store".to_string(); - let expected = Err(Error::Keystore(err_msg)); - assert_eq!(verify_validator_set::(&1, &validator_set, &worker.key_store), expected); - - // worker has no keystore - worker.key_store = None.into(); - let expected_err = Err(Error::Keystore("no Keystore".into())); - assert_eq!( - verify_validator_set::(&1, &validator_set, &worker.key_store), - expected_err - ); - } - #[tokio::test] async fn should_finalize_correctly() { let keys = [Keyring::Alice]; -- GitLab From 7430f413503f8008fe60eb2e4ebd76d14af12ea9 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:12:34 +0300 Subject: [PATCH 082/128] chainHead: Allow methods to be called from within a single connection context and limit connections (#3481) This PR ensures that the chainHead RPC class can be called only from within the same connection context. The chainHead methods are now registered as raw methods. - https://github.com/paritytech/jsonrpsee/pull/1297 The concept of raw methods is introduced in jsonrpsee, which is an async method that exposes the connection ID: The raw method doesn't have the concept of a blocking method. Previously blocking methods are now spawning a blocking task to handle their blocking (ie DB) access. We spawn the same number of tasks as before, however we do that explicitly. Another approach would be implementing a RPC middleware that captures and decodes the method parameters: - https://github.com/paritytech/polkadot-sdk/pull/3343 However, that approach is prone to errors since the methods are hardcoded by name. Performace is affected by the double deserialization that needs to happen to extract the subscription ID we'd like to limit. Once from the middleware, and once from the methods itself. This PR paves the way to implement the chainHead connection limiter: - https://github.com/paritytech/polkadot-sdk/issues/1505 Registering tokens (subscription ID / operation ID) on the `RpcConnections` could be extended to return an error when the maximum number of operations is reached. While at it, have added an integration-test to ensure that chainHead methods can be called from within the same connection context. Before this is merged, a new JsonRPC release should be made to expose the `raw-methods`: - [x] Use jsonrpsee from crates io (blocked by: https://github.com/paritytech/jsonrpsee/pull/1297) Closes: https://github.com/paritytech/polkadot-sdk/issues/3207 cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile Co-authored-by: Niklas Adolfsson --- Cargo.lock | 34 +- substrate/client/rpc-spec-v2/Cargo.toml | 1 + .../client/rpc-spec-v2/src/chain_head/api.rs | 30 +- .../rpc-spec-v2/src/chain_head/chain_head.rs | 321 ++++++++++++------ .../src/chain_head/chain_head_follow.rs | 17 +- .../rpc-spec-v2/src/chain_head/error.rs | 7 + .../src/chain_head/subscription/inner.rs | 53 +++ .../src/chain_head/subscription/mod.rs | 132 ++++++- .../rpc-spec-v2/src/chain_head/tests.rs | 230 ++++++++++++- .../rpc-spec-v2/src/common/connections.rs | 262 ++++++++++++++ .../client/rpc-spec-v2/src/common/mod.rs | 1 + 11 files changed, 934 insertions(+), 154 deletions(-) create mode 100644 substrate/client/rpc-spec-v2/src/common/connections.rs diff --git a/Cargo.lock b/Cargo.lock index 9a8eff4691e..24612391d3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6840,9 +6840,9 @@ checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" [[package]] name = "jsonrpsee" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a95f7cc23d5fab0cdeeaf6bad8c8f5e7a3aa7f0d211957ea78232b327ab27b0" +checksum = "87f3ae45a64cfc0882934f963be9431b2a165d667f53140358181f262aca0702" dependencies = [ "jsonrpsee-core", "jsonrpsee-http-client", @@ -6856,9 +6856,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b1736cfa3845fd9f8f43751f2b8e0e83f7b6081e754502f7d63b6587692cc83" +checksum = "455fc882e56f58228df2aee36b88a1340eafd707c76af2fa68cf94b37d461131" dependencies = [ "futures-util", "http", @@ -6877,9 +6877,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82030d038658974732103e623ba2e0abec03bbbe175b39c0a2fafbada60c5868" +checksum = "b75568f4f9696e3a47426e1985b548e1a9fcb13372a5e320372acaf04aca30d1" dependencies = [ "anyhow", "async-lock 3.3.0", @@ -6903,9 +6903,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36a06ef0de060005fddf772d54597bb6a8b0413da47dcffd304b0306147b9678" +checksum = "9e7a95e346f55df84fb167b7e06470e196e7d5b9488a21d69c5d9732043ba7ba" dependencies = [ "async-trait", "hyper", @@ -6923,22 +6923,22 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69fc56131589f82e57805f7338b87023db4aafef813555708b159787e34ad6bc" +checksum = "30ca066e73dd70294aebc5c2675d8ffae43be944af027c857ce0d4c51785f014" dependencies = [ "heck 0.4.1", "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.53", ] [[package]] name = "jsonrpsee-server" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d85be77fe5b2a94589e3164fb780017f7aff7d646b49278c0d0346af16975c8e" +checksum = "0e29c1bd1f9bba83c864977c73404e505f74f730fa0db89dd490ec174e36d7f0" dependencies = [ "futures-util", "http", @@ -6960,9 +6960,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a48fdc1202eafc51c63e00406575e59493284ace8b8b61aa16f3a6db5d64f1a" +checksum = "3467fd35feeee179f71ab294516bdf3a81139e7aeebdd860e46897c12e1a3368" dependencies = [ "anyhow", "beef", @@ -6973,9 +6973,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5ce25d70a8e4d3cc574bbc3cad0137c326ad64b194793d5e7bbdd3fa4504181" +checksum = "68ca71e74983f624c0cb67828e480a981586074da8ad3a2f214c6a3f884edab9" dependencies = [ "http", "jsonrpsee-client-transport", diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index c62b3e789d3..937e5c6b626 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -44,6 +44,7 @@ futures-util = { version = "0.3.30", default-features = false } rand = "0.8.5" [dev-dependencies] +jsonrpsee = { version = "0.22", features = ["server", "ws-client"] } serde_json = { workspace = true, default-features = true } tokio = { version = "1.22.0", features = ["macros"] } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/api.rs b/substrate/client/rpc-spec-v2/src/chain_head/api.rs index 00000e1fb27..3851adac264 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/api.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/api.rs @@ -27,7 +27,7 @@ use crate::{ common::events::StorageQuery, }; use jsonrpsee::{proc_macros::rpc, server::ResponsePayload}; -use sp_rpc::list::ListOrValue; +pub use sp_rpc::list::ListOrValue; #[rpc(client, server)] pub trait ChainHeadApi { @@ -54,8 +54,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_body", blocking)] - fn chain_head_unstable_body( + #[method(name = "chainHead_unstable_body", raw_method)] + async fn chain_head_unstable_body( &self, follow_subscription: String, hash: Hash, @@ -73,8 +73,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_header", blocking)] - fn chain_head_unstable_header( + #[method(name = "chainHead_unstable_header", raw_method)] + async fn chain_head_unstable_header( &self, follow_subscription: String, hash: Hash, @@ -85,8 +85,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_storage", blocking)] - fn chain_head_unstable_storage( + #[method(name = "chainHead_unstable_storage", raw_method)] + async fn chain_head_unstable_storage( &self, follow_subscription: String, hash: Hash, @@ -99,8 +99,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_call", blocking)] - fn chain_head_unstable_call( + #[method(name = "chainHead_unstable_call", raw_method)] + async fn chain_head_unstable_call( &self, follow_subscription: String, hash: Hash, @@ -118,8 +118,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_unpin", blocking)] - fn chain_head_unstable_unpin( + #[method(name = "chainHead_unstable_unpin", raw_method)] + async fn chain_head_unstable_unpin( &self, follow_subscription: String, hash_or_hashes: ListOrValue, @@ -131,8 +131,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_continue", blocking)] - fn chain_head_unstable_continue( + #[method(name = "chainHead_unstable_continue", raw_method)] + async fn chain_head_unstable_continue( &self, follow_subscription: String, operation_id: String, @@ -145,8 +145,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_stopOperation", blocking)] - fn chain_head_unstable_stop_operation( + #[method(name = "chainHead_unstable_stopOperation", raw_method)] + async fn chain_head_unstable_stop_operation( &self, follow_subscription: String, operation_id: String, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 2bda22b4523..975abbca4b6 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -34,10 +34,10 @@ use crate::{ hex_string, SubscriptionTaskExecutor, }; use codec::Encode; -use futures::future::FutureExt; +use futures::{channel::oneshot, future::FutureExt}; use jsonrpsee::{ - core::async_trait, server::ResponsePayload, types::SubscriptionId, MethodResponseFuture, - PendingSubscriptionSink, SubscriptionSink, + core::async_trait, server::ResponsePayload, types::SubscriptionId, ConnectionDetails, + MethodResponseFuture, PendingSubscriptionSink, SubscriptionSink, }; use log::debug; use sc_client_api::{ @@ -65,6 +65,8 @@ pub struct ChainHeadConfig { /// The maximum number of items reported by the `chainHead_storage` before /// pagination is required. pub operation_max_storage_items: usize, + /// The maximum number of `chainHead_follow` subscriptions per connection. + pub max_follow_subscriptions_per_connection: usize, } /// Maximum pinned blocks across all connections. @@ -86,6 +88,9 @@ const MAX_ONGOING_OPERATIONS: usize = 16; /// before paginations is required. const MAX_STORAGE_ITER_ITEMS: usize = 5; +/// The maximum number of `chainHead_follow` subscriptions per connection. +const MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION: usize = 4; + impl Default for ChainHeadConfig { fn default() -> Self { ChainHeadConfig { @@ -93,6 +98,7 @@ impl Default for ChainHeadConfig { subscription_max_pinned_duration: MAX_PINNED_DURATION, subscription_max_ongoing_operations: MAX_ONGOING_OPERATIONS, operation_max_storage_items: MAX_STORAGE_ITER_ITEMS, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, } } } @@ -106,7 +112,7 @@ pub struct ChainHead, Block: BlockT, Client> { /// Executor to spawn subscriptions. executor: SubscriptionTaskExecutor, /// Keep track of the pinned blocks for each subscription. - subscriptions: Arc>, + subscriptions: SubscriptionManagement, /// The maximum number of items reported by the `chainHead_storage` before /// pagination is required. operation_max_storage_items: usize, @@ -126,12 +132,13 @@ impl, Block: BlockT, Client> ChainHead { client, backend: backend.clone(), executor, - subscriptions: Arc::new(SubscriptionManagement::new( + subscriptions: SubscriptionManagement::new( config.global_max_pinned_blocks, config.subscription_max_pinned_duration, config.subscription_max_ongoing_operations, + config.max_follow_subscriptions_per_connection, backend, - )), + ), operation_max_storage_items: config.operation_max_storage_items, _phantom: PhantomData, } @@ -182,12 +189,23 @@ where let client = self.client.clone(); let fut = async move { + // Ensure the current connection ID has enough space to accept a new subscription. + let connection_id = pending.connection_id(); + // The RAII `reserved_subscription` will clean up resources on drop: + // - free the reserved subscription for the connection ID. + // - remove the subscription ID from the subscription management. + let Some(mut reserved_subscription) = subscriptions.reserve_subscription(connection_id) + else { + pending.reject(ChainHeadRpcError::ReachedLimits).await; + return + }; + let Ok(sink) = pending.accept().await else { return }; let sub_id = read_subscription_id_as_string(&sink); - // Keep track of the subscription. - let Some(sub_data) = subscriptions.insert_subscription(sub_id.clone(), with_runtime) + let Some(sub_data) = + reserved_subscription.insert_subscription(sub_id.clone(), with_runtime) else { // Inserting the subscription can only fail if the JsonRPSee // generated a duplicate subscription ID. @@ -201,91 +219,117 @@ where let mut chain_head_follow = ChainHeadFollower::new( client, backend, - subscriptions.clone(), + subscriptions, with_runtime, sub_id.clone(), ); chain_head_follow.generate_events(sink, sub_data).await; - subscriptions.remove_subscription(&sub_id); debug!(target: LOG_TARGET, "[follow][id={:?}] Subscription removed", sub_id); }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); } - fn chain_head_unstable_body( + async fn chain_head_unstable_body( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash: Block::Hash, ) -> ResponsePayload<'static, MethodResponse> { - let mut block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { - Ok(block) => block, - Err(SubscriptionManagementError::SubscriptionAbsent) | - Err(SubscriptionManagementError::ExceededLimits) => - return ResponsePayload::success(MethodResponse::LimitReached), - Err(SubscriptionManagementError::BlockHashAbsent) => { - // Block is not part of the subscription. - return ResponsePayload::error(ChainHeadRpcError::InvalidBlock); - }, - Err(_) => return ResponsePayload::error(ChainHeadRpcError::InvalidBlock), - }; + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + // The spec says to return `LimitReached` if the follow subscription is invalid or + // stale. + return ResponsePayload::success(MethodResponse::LimitReached); + } - let operation_id = block_guard.operation().operation_id(); + let client = self.client.clone(); + let subscriptions = self.subscriptions.clone(); + let executor = self.executor.clone(); + + let result = spawn_blocking(&self.executor, async move { + let mut block_guard = match subscriptions.lock_block(&follow_subscription, hash, 1) { + Ok(block) => block, + Err(SubscriptionManagementError::SubscriptionAbsent) | + Err(SubscriptionManagementError::ExceededLimits) => + return ResponsePayload::success(MethodResponse::LimitReached), + Err(SubscriptionManagementError::BlockHashAbsent) => { + // Block is not part of the subscription. + return ResponsePayload::error(ChainHeadRpcError::InvalidBlock); + }, + Err(_) => return ResponsePayload::error(ChainHeadRpcError::InvalidBlock), + }; - let event = match self.client.block(hash) { - Ok(Some(signed_block)) => { - let extrinsics = signed_block - .block - .extrinsics() - .iter() - .map(|extrinsic| hex_string(&extrinsic.encode())) - .collect(); - FollowEvent::::OperationBodyDone(OperationBodyDone { + let operation_id = block_guard.operation().operation_id(); + + let event = match client.block(hash) { + Ok(Some(signed_block)) => { + let extrinsics = signed_block + .block + .extrinsics() + .iter() + .map(|extrinsic| hex_string(&extrinsic.encode())) + .collect(); + FollowEvent::::OperationBodyDone(OperationBodyDone { + operation_id: operation_id.clone(), + value: extrinsics, + }) + }, + Ok(None) => { + // The block's body was pruned. This subscription ID has become invalid. + debug!( + target: LOG_TARGET, + "[body][id={:?}] Stopping subscription because hash={:?} was pruned", + &follow_subscription, + hash + ); + subscriptions.remove_subscription(&follow_subscription); + return ResponsePayload::error(ChainHeadRpcError::InvalidBlock) + }, + Err(error) => FollowEvent::::OperationError(OperationError { operation_id: operation_id.clone(), - value: extrinsics, - }) - }, - Ok(None) => { - // The block's body was pruned. This subscription ID has become invalid. - debug!( - target: LOG_TARGET, - "[body][id={:?}] Stopping subscription because hash={:?} was pruned", - &follow_subscription, - hash - ); - self.subscriptions.remove_subscription(&follow_subscription); - return ResponsePayload::error(ChainHeadRpcError::InvalidBlock) - }, - Err(error) => FollowEvent::::OperationError(OperationError { - operation_id: operation_id.clone(), - error: error.to_string(), - }), - }; + error: error.to_string(), + }), + }; - let (rp, rp_fut) = method_started_response(operation_id, None); + let (rp, rp_fut) = method_started_response(operation_id, None); + let fut = async move { + // Wait for the server to send out the response and if it produces an error no event + // should be generated. + if rp_fut.await.is_err() { + return; + } - let fut = async move { - // Events should only by generated - // if the response was successfully propagated. - if rp_fut.await.is_err() { - return; - } - let _ = block_guard.response_sender().unbounded_send(event); - }; + let _ = block_guard.response_sender().unbounded_send(event); + }; + executor.spawn_blocking("substrate-rpc-subscription", Some("rpc"), fut.boxed()); - self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + rp + }); - rp + result + .await + .unwrap_or_else(|_| ResponsePayload::success(MethodResponse::LimitReached)) } - fn chain_head_unstable_header( + async fn chain_head_unstable_header( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash: Block::Hash, ) -> Result, ChainHeadRpcError> { - let _block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + return Ok(None); + } + + let block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { Ok(block) => block, Err(SubscriptionManagementError::SubscriptionAbsent) | Err(SubscriptionManagementError::ExceededLimits) => return Ok(None), @@ -296,19 +340,35 @@ where Err(_) => return Err(ChainHeadRpcError::InvalidBlock.into()), }; - self.client - .header(hash) - .map(|opt_header| opt_header.map(|h| hex_string(&h.encode()))) - .map_err(|err| ChainHeadRpcError::InternalError(err.to_string())) + let client = self.client.clone(); + let result = spawn_blocking(&self.executor, async move { + let _block_guard = block_guard; + + client + .header(hash) + .map(|opt_header| opt_header.map(|h| hex_string(&h.encode()))) + .map_err(|err| ChainHeadRpcError::InternalError(err.to_string())) + }); + result.await.unwrap_or_else(|_| Ok(None)) } - fn chain_head_unstable_storage( + async fn chain_head_unstable_storage( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash: Block::Hash, items: Vec>, child_trie: Option, ) -> ResponsePayload<'static, MethodResponse> { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + // The spec says to return `LimitReached` if the follow subscription is invalid or + // stale. + return ResponsePayload::success(MethodResponse::LimitReached); + } + // Gain control over parameter parsing and returned error. let items = match items .into_iter() @@ -357,25 +417,25 @@ where let mut items = items; items.truncate(num_operations); - let (rp, rp_is_success) = method_started_response(operation_id, Some(discarded)); - + let (rp, rp_fut) = method_started_response(operation_id, Some(discarded)); let fut = async move { - // Events should only by generated - // if the response was successfully propagated. - if rp_is_success.await.is_err() { + // Wait for the server to send out the response and if it produces an error no event + // should be generated. + if rp_fut.await.is_err() { return; } + storage_client.generate_events(block_guard, hash, items, child_trie).await; }; - self.executor .spawn_blocking("substrate-rpc-subscription", Some("rpc"), fut.boxed()); rp } - fn chain_head_unstable_call( + async fn chain_head_unstable_call( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash: Block::Hash, function: String, @@ -386,6 +446,15 @@ where Err(err) => return ResponsePayload::error(err), }; + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + // The spec says to return `LimitReached` if the follow subscription is invalid or + // stale. + return ResponsePayload::success(MethodResponse::LimitReached); + } + let mut block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { Ok(block) => block, Err(SubscriptionManagementError::SubscriptionAbsent) | @@ -408,44 +477,53 @@ where } let operation_id = block_guard.operation().operation_id(); - let event = self - .client - .executor() - .call(hash, &function, &call_parameters, CallContext::Offchain) - .map(|result| { - FollowEvent::::OperationCallDone(OperationCallDone { - operation_id: operation_id.clone(), - output: hex_string(&result), - }) - }) - .unwrap_or_else(|error| { - FollowEvent::::OperationError(OperationError { - operation_id: operation_id.clone(), - error: error.to_string(), - }) - }); - - let (rp, rp_fut) = method_started_response(operation_id, None); + let client = self.client.clone(); + let (rp, rp_fut) = method_started_response(operation_id.clone(), None); let fut = async move { - // Events should only by generated - // if the response was successfully propagated. + // Wait for the server to send out the response and if it produces an error no event + // should be generated. if rp_fut.await.is_err() { - return; + return } + + let event = client + .executor() + .call(hash, &function, &call_parameters, CallContext::Offchain) + .map(|result| { + FollowEvent::::OperationCallDone(OperationCallDone { + operation_id: operation_id.clone(), + output: hex_string(&result), + }) + }) + .unwrap_or_else(|error| { + FollowEvent::::OperationError(OperationError { + operation_id: operation_id.clone(), + error: error.to_string(), + }) + }); + let _ = block_guard.response_sender().unbounded_send(event); }; - - self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + self.executor + .spawn_blocking("substrate-rpc-subscription", Some("rpc"), fut.boxed()); rp } - fn chain_head_unstable_unpin( + async fn chain_head_unstable_unpin( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash_or_hashes: ListOrValue, ) -> Result<(), ChainHeadRpcError> { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + return Ok(()); + } + let result = match hash_or_hashes { ListOrValue::Value(hash) => self.subscriptions.unpin_blocks(&follow_subscription, [hash]), @@ -469,11 +547,19 @@ where } } - fn chain_head_unstable_continue( + async fn chain_head_unstable_continue( &self, + connection_details: ConnectionDetails, follow_subscription: String, operation_id: String, ) -> Result<(), ChainHeadRpcError> { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + return Ok(()) + } + let Some(operation) = self.subscriptions.get_operation(&follow_subscription, &operation_id) else { return Ok(()) @@ -487,11 +573,19 @@ where } } - fn chain_head_unstable_stop_operation( + async fn chain_head_unstable_stop_operation( &self, + connection_details: ConnectionDetails, follow_subscription: String, operation_id: String, ) -> Result<(), ChainHeadRpcError> { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + return Ok(()) + } + let Some(operation) = self.subscriptions.get_operation(&follow_subscription, &operation_id) else { return Ok(()) @@ -510,3 +604,26 @@ fn method_started_response( let rp = MethodResponse::Started(MethodResponseStarted { operation_id, discarded_items }); ResponsePayload::success(rp).notify_on_completion() } + +/// Spawn a blocking future on the provided executor and return the result on a oneshot channel. +/// +/// This is a wrapper to extract the result of a `executor.spawn_blocking` future. +fn spawn_blocking( + executor: &SubscriptionTaskExecutor, + fut: impl std::future::Future + Send + 'static, +) -> oneshot::Receiver +where + R: Send + 'static, +{ + let (tx, rx) = oneshot::channel(); + + let blocking_fut = async move { + let result = fut.await; + // Send the result back on the channel. + let _ = tx.send(result); + }; + + executor.spawn_blocking("substrate-rpc-subscription", Some("rpc"), blocking_fut.boxed()); + + rx +} diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index afa99f3aa16..90cc62a36fa 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -60,7 +60,7 @@ pub struct ChainHeadFollower, Block: BlockT, Client> { /// Backend of the chain. backend: Arc, /// Subscriptions handle. - sub_handle: Arc>, + sub_handle: SubscriptionManagement, /// Subscription was started with the runtime updates flag. with_runtime: bool, /// Subscription ID. @@ -74,7 +74,7 @@ impl, Block: BlockT, Client> ChainHeadFollower, backend: Arc, - sub_handle: Arc>, + sub_handle: SubscriptionManagement, with_runtime: bool, sub_id: String, ) -> Self { @@ -546,7 +546,12 @@ where EventStream: Stream> + Unpin, { let mut stream_item = stream.next(); - let mut stop_event = rx_stop; + + // The stop event can be triggered by the chainHead logic when the pinned + // block guarantee cannot be hold. Or when the client is disconnected. + let connection_closed = sink.closed(); + tokio::pin!(connection_closed); + let mut stop_event = futures_util::future::select(rx_stop, connection_closed); while let Either::Left((Some(event), next_stop_event)) = futures_util::future::select(stream_item, stop_event).await @@ -594,8 +599,10 @@ where stop_event = next_stop_event; } - // If we got here either the substrate streams have closed - // or the `Stop` receiver was triggered. + // If we got here either: + // - the substrate streams have closed + // - the `Stop` receiver was triggered internally (cannot hold the pinned block guarantee) + // - the client disconnected. let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/error.rs b/substrate/client/rpc-spec-v2/src/chain_head/error.rs index 8c50e445aa0..35604db0660 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/error.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/error.rs @@ -23,6 +23,9 @@ use jsonrpsee::types::error::ErrorObject; /// ChainHead RPC errors. #[derive(Debug, thiserror::Error)] pub enum Error { + /// Maximum number of chainHead_follow has been reached. + #[error("Maximum number of chainHead_follow has been reached")] + ReachedLimits, /// The provided block hash is invalid. #[error("Invalid block hash")] InvalidBlock, @@ -46,6 +49,8 @@ pub enum Error { /// Errors for `chainHead` RPC module, as defined in /// . pub mod rpc_spec_v2 { + /// Maximum number of chainHead_follow has been reached. + pub const REACHED_LIMITS: i32 = -32800; /// The provided block hash is invalid. pub const INVALID_BLOCK_ERROR: i32 = -32801; /// The follow subscription was started with `withRuntime` set to `false`. @@ -70,6 +75,8 @@ impl From for ErrorObject<'static> { let msg = e.to_string(); match e { + Error::ReachedLimits => + ErrorObject::owned(rpc_spec_v2::REACHED_LIMITS, msg, None::<()>), Error::InvalidBlock => ErrorObject::owned(rpc_spec_v2::INVALID_BLOCK_ERROR, msg, None::<()>), Error::InvalidRuntimeCall(_) => diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index d2879679501..1ebee3c80fc 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -1455,4 +1455,57 @@ mod tests { let permit_three = ops.reserve_at_most(1).unwrap(); assert_eq!(permit_three.num_ops, 1); } + + #[test] + fn reserved_subscription_cleans_resources() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let subs = Arc::new(parking_lot::RwLock::new(SubscriptionsInner::new( + 10, + Duration::from_secs(10), + MAX_OPERATIONS_PER_SUB, + backend, + ))); + + // Maximum 2 subscriptions per connection. + let rpc_connections = crate::common::connections::RpcConnections::new(2); + + let subscription_management = + crate::chain_head::subscription::SubscriptionManagement::_from_inner( + subs.clone(), + rpc_connections.clone(), + ); + + let reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); + let mut reserved_sub_second = subscription_management.reserve_subscription(1).unwrap(); + // Subscriptions reserved but not yet populated. + assert_eq!(subs.read().subs.len(), 0); + + // Cannot reserve anymore. + assert!(subscription_management.reserve_subscription(1).is_none()); + // Drop the first subscription. + drop(reserved_sub_first); + // Space is freed-up for the rpc connections. + let mut reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); + + // Insert subscriptions. + let _sub_data_first = + reserved_sub_first.insert_subscription("sub1".to_string(), true).unwrap(); + let _sub_data_second = + reserved_sub_second.insert_subscription("sub2".to_string(), true).unwrap(); + // Check we have 2 subscriptions under management. + assert_eq!(subs.read().subs.len(), 2); + + // Drop first reserved subscription. + drop(reserved_sub_first); + // Check that the subscription is removed. + assert_eq!(subs.read().subs.len(), 1); + // Space is freed-up for the rpc connections. + let reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); + + // Drop all subscriptions. + drop(reserved_sub_first); + drop(reserved_sub_second); + assert_eq!(subs.read().subs.len(), 0); + } } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs index c830e662da2..5b016af1aa4 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs @@ -16,6 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use jsonrpsee::ConnectionId; use parking_lot::RwLock; use sc_client_api::Backend; use sp_runtime::traits::Block as BlockT; @@ -24,6 +25,11 @@ use std::{sync::Arc, time::Duration}; mod error; mod inner; +use crate::{ + chain_head::chain_head::LOG_TARGET, + common::connections::{RegisteredConnection, ReservedConnection, RpcConnections}, +}; + use self::inner::SubscriptionsInner; pub use self::inner::OperationState; @@ -34,7 +40,22 @@ pub use inner::{BlockGuard, InsertedSubscriptionData}; pub struct SubscriptionManagement> { /// Manage subscription by mapping the subscription ID /// to a set of block hashes. - inner: RwLock>, + inner: Arc>>, + + /// Ensures that chainHead methods can be called from a single connection context. + /// + /// For example, `chainHead_storage` cannot be called with a subscription ID that + /// was obtained from a different connection. + rpc_connections: RpcConnections, +} + +impl> Clone for SubscriptionManagement { + fn clone(&self) -> Self { + SubscriptionManagement { + inner: self.inner.clone(), + rpc_connections: self.rpc_connections.clone(), + } + } } impl> SubscriptionManagement { @@ -43,30 +64,55 @@ impl> SubscriptionManagement { global_max_pinned_blocks: usize, local_max_pin_duration: Duration, max_ongoing_operations: usize, + max_follow_subscriptions_per_connection: usize, backend: Arc, ) -> Self { SubscriptionManagement { - inner: RwLock::new(SubscriptionsInner::new( + inner: Arc::new(RwLock::new(SubscriptionsInner::new( global_max_pinned_blocks, local_max_pin_duration, max_ongoing_operations, backend, - )), + ))), + rpc_connections: RpcConnections::new(max_follow_subscriptions_per_connection), } } - /// Insert a new subscription ID. + /// Create a new instance from the inner state. /// - /// If the subscription was not previously inserted, returns the receiver that is - /// triggered upon the "Stop" event. Otherwise, if the subscription ID was already - /// inserted returns none. - pub fn insert_subscription( + /// # Note + /// + /// Used for testing. + #[cfg(test)] + pub(crate) fn _from_inner( + inner: Arc>>, + rpc_connections: RpcConnections, + ) -> Self { + SubscriptionManagement { inner, rpc_connections } + } + + /// Reserve space for a subscriptions. + /// + /// Fails if the connection ID is has reached the maximum number of active subscriptions. + pub fn reserve_subscription( &self, - sub_id: String, - runtime_updates: bool, - ) -> Option> { - let mut inner = self.inner.write(); - inner.insert_subscription(sub_id, runtime_updates) + connection_id: ConnectionId, + ) -> Option> { + let reserved_token = self.rpc_connections.reserve_space(connection_id)?; + + Some(ReservedSubscription { + state: ConnectionState::Reserved(reserved_token), + inner: self.inner.clone(), + }) + } + + /// Check if the given connection contains the given subscription. + pub fn contains_subscription( + &self, + connection_id: ConnectionId, + subscription_id: &str, + ) -> bool { + self.rpc_connections.contains_identifier(connection_id, subscription_id) } /// Remove the subscription ID with associated pinned blocks. @@ -136,3 +182,63 @@ impl> SubscriptionManagement { inner.get_operation(sub_id, operation_id) } } + +/// The state of the connection. +/// +/// The state starts in a [`ConnectionState::Reserved`] state and then transitions to +/// [`ConnectionState::Registered`] when the subscription is inserted. +enum ConnectionState { + Reserved(ReservedConnection), + Registered { _unregister_on_drop: RegisteredConnection, sub_id: String }, + Empty, +} + +/// RAII wrapper that removes the subscription from internal mappings and +/// gives back the reserved space for the connection. +pub struct ReservedSubscription> { + state: ConnectionState, + inner: Arc>>, +} + +impl> ReservedSubscription { + /// Insert a new subscription ID. + /// + /// If the subscription was not previously inserted, returns the receiver that is + /// triggered upon the "Stop" event. Otherwise, if the subscription ID was already + /// inserted returns none. + /// + /// # Note + /// + /// This method should be called only once. + pub fn insert_subscription( + &mut self, + sub_id: String, + runtime_updates: bool, + ) -> Option> { + match std::mem::replace(&mut self.state, ConnectionState::Empty) { + ConnectionState::Reserved(reserved) => { + let registered_token = reserved.register(sub_id.clone())?; + self.state = ConnectionState::Registered { + _unregister_on_drop: registered_token, + sub_id: sub_id.clone(), + }; + + let mut inner = self.inner.write(); + inner.insert_subscription(sub_id, runtime_updates) + }, + // Cannot insert multiple subscriptions into one single reserved space. + ConnectionState::Registered { .. } | ConnectionState::Empty => { + log::error!(target: LOG_TARGET, "Called insert_subscription on a connection that is not reserved"); + None + }, + } + } +} + +impl> Drop for ReservedSubscription { + fn drop(&mut self) { + if let ConnectionState::Registered { sub_id, .. } = &self.state { + self.inner.write().remove_subscription(sub_id); + } + } +} diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 30152efb5b6..c3f10a201c5 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::{ - chain_head::{event::MethodResponse, test_utils::ChainHeadMockClient}, + chain_head::{api::ChainHeadApiClient, event::MethodResponse, test_utils::ChainHeadMockClient}, common::events::{StorageQuery, StorageQueryType, StorageResultType}, hex_string, }; @@ -27,8 +27,12 @@ use assert_matches::assert_matches; use codec::{Decode, Encode}; use futures::Future; use jsonrpsee::{ - core::server::Subscription as RpcSubscription, rpc_params, MethodsError as Error, RpcModule, + core::{ + client::Subscription as RpcClientSubscription, server::Subscription as RpcSubscription, + }, + rpc_params, MethodsError as Error, RpcModule, }; + use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; use sc_service::client::new_in_mem; @@ -59,6 +63,8 @@ const MAX_PINNED_BLOCKS: usize = 32; const MAX_PINNED_SECS: u64 = 60; const MAX_OPERATIONS: usize = 16; const MAX_PAGINATION_LIMIT: usize = 5; +const MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION: usize = 4; + const INVALID_HASH: [u8; 32] = [1; 32]; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; @@ -66,6 +72,35 @@ const CHILD_STORAGE_KEY: &[u8] = b"child"; const CHILD_VALUE: &[u8] = b"child value"; const DOES_NOT_PRODUCE_EVENTS_SECONDS: u64 = 10; +/// Start an RPC server with the chainHead module. +pub async fn run_server() -> std::net::SocketAddr { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let client = Arc::new(builder.build()); + + let api = ChainHead::new( + client, + backend, + Arc::new(TaskExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: 1, + }, + ) + .into_rpc(); + + let server = jsonrpsee::server::ServerBuilder::default().build("127.0.0.1:0").await.unwrap(); + + let addr = server.local_addr().unwrap(); + let handle = server.start(api); + + tokio::spawn(handle.stopped()); + addr +} + async fn get_next_event(sub: &mut RpcSubscription) -> T { let (event, _sub_id) = tokio::time::timeout(std::time::Duration::from_secs(60), sub.next()) .await @@ -113,6 +148,7 @@ async fn setup_api() -> ( subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -163,6 +199,7 @@ async fn follow_subscription_produces_blocks() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -231,6 +268,7 @@ async fn follow_with_runtime() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -543,6 +581,7 @@ async fn call_runtime_without_flag() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1201,6 +1240,7 @@ async fn separate_operation_ids_for_subscriptions() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1289,6 +1329,7 @@ async fn follow_generates_initial_blocks() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1444,6 +1485,7 @@ async fn follow_exceeding_pinned_blocks() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1520,6 +1562,7 @@ async fn follow_with_unpin() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1631,6 +1674,7 @@ async fn unpin_duplicate_hashes() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1733,6 +1777,7 @@ async fn follow_with_multiple_unpin_hashes() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1886,6 +1931,7 @@ async fn follow_prune_best_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2071,6 +2117,7 @@ async fn follow_forks_pruned_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2230,6 +2277,7 @@ async fn follow_report_multiple_pruned_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2475,6 +2523,7 @@ async fn pin_block_references() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2612,6 +2661,7 @@ async fn follow_finalized_before_new_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2726,6 +2776,7 @@ async fn ensure_operation_limits_works() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: 1, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2830,6 +2881,7 @@ async fn check_continue_operation() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: 1, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -3012,6 +3064,7 @@ async fn stop_storage_operation() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: 1, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -3297,3 +3350,176 @@ async fn storage_closest_merkle_value() { merkle_values_rhs.get(&hex_string(b":AAAA")).unwrap() ); } + +#[tokio::test] +async fn chain_head_single_connection_context() { + let server_addr = run_server().await; + let server_url = format!("ws://{}", server_addr); + let client = jsonrpsee::ws_client::WsClientBuilder::default() + .build(&server_url) + .await + .unwrap(); + // Calls cannot be made from a different connection context. + let second_client = jsonrpsee::ws_client::WsClientBuilder::default() + .build(&server_url) + .await + .unwrap(); + + let mut sub: RpcClientSubscription> = + ChainHeadApiClient::::chain_head_unstable_follow(&client, true) + .await + .unwrap(); + + let event = tokio::time::timeout(std::time::Duration::from_secs(60), sub.next()) + .await + .unwrap() + .unwrap() + .unwrap(); + let finalized_hash = match event { + FollowEvent::Initialized(init) => init.finalized_block_hashes.into_iter().last().unwrap(), + _ => panic!("Expected FollowEvent::Initialized"), + }; + + let first_sub_id = match sub.kind() { + jsonrpsee::core::client::SubscriptionKind::Subscription(id) => match id { + jsonrpsee::types::SubscriptionId::Num(num) => num.to_string(), + jsonrpsee::types::SubscriptionId::Str(s) => s.to_string(), + }, + _ => panic!("Unexpected subscription ID"), + }; + + // Trying to unpin from a different connection will have no effect. + let _response = ChainHeadApiClient::::chain_head_unstable_unpin( + &second_client, + first_sub_id.clone(), + crate::chain_head::api::ListOrValue::Value(finalized_hash.clone()), + ) + .await + .unwrap(); + + // Body can still be fetched from the first subscription. + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_body( + &client, + first_sub_id.clone(), + finalized_hash.clone(), + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::Started(_started)); + + // Cannot make a call from a different connection context. + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_body( + &second_client, + first_sub_id.clone(), + finalized_hash.clone(), + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::LimitReached); + + let response: Option = ChainHeadApiClient::::chain_head_unstable_header( + &client, + first_sub_id.clone(), + finalized_hash.clone(), + ) + .await + .unwrap(); + assert!(response.is_some()); + // Cannot make a call from a different connection context. + let response: Option = ChainHeadApiClient::::chain_head_unstable_header( + &second_client, + first_sub_id.clone(), + finalized_hash.clone(), + ) + .await + .unwrap(); + assert!(response.is_none()); + + let key = hex_string(&KEY); + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_storage( + &client, + first_sub_id.clone(), + finalized_hash.clone(), + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }], + None, + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::Started(_started)); + // Cannot make a call from a different connection context. + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_storage( + &second_client, + first_sub_id.clone(), + finalized_hash.clone(), + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }], + None, + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::LimitReached); + + let alice_id = AccountKeyring::Alice.to_account_id(); + // Hex encoded scale encoded bytes representing the call parameters. + let call_parameters = hex_string(&alice_id.encode()); + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_call( + &client, + first_sub_id.clone(), + finalized_hash.clone(), + "AccountNonceApi_account_nonce".into(), + call_parameters.clone(), + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::Started(_started)); + // Cannot make a call from a different connection context. + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_call( + &second_client, + first_sub_id.clone(), + finalized_hash.clone(), + "AccountNonceApi_account_nonce".into(), + call_parameters.clone(), + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::LimitReached); +} + +#[tokio::test] +async fn chain_head_limit_reached() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let client = Arc::new(builder.build()); + + // Maximum of 1 chainHead_follow subscription. + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TaskExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: 1, + }, + ) + .into_rpc(); + + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + // Initialized must always be reported first. + let _event: FollowEvent = get_next_event(&mut sub).await; + + let error = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap_err(); + assert!(error + .to_string() + .contains("Maximum number of chainHead_follow has been reached")); + + // After dropping the subscription, other subscriptions are allowed to be created. + drop(sub); + // Ensure the `chainHead_unfollow` is propagated to the server. + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + // Initialized must always be reported first. + let _event: FollowEvent = get_next_event(&mut sub).await; +} diff --git a/substrate/client/rpc-spec-v2/src/common/connections.rs b/substrate/client/rpc-spec-v2/src/common/connections.rs new file mode 100644 index 00000000000..c16a80bf49d --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/common/connections.rs @@ -0,0 +1,262 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use jsonrpsee::ConnectionId; +use parking_lot::Mutex; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + +/// Connection state which keeps track whether a connection exist and +/// the number of concurrent operations. +#[derive(Default, Clone)] +pub struct RpcConnections { + /// The number of identifiers that can be registered for each connection. + /// + /// # Example + /// + /// This is used to limit how many `chainHead_follow` subscriptions are active at one time. + capacity: usize, + /// Map the connecton ID to a set of identifiers. + data: Arc>>, +} + +#[derive(Default)] +struct ConnectionData { + /// The total number of identifiers for the given connection. + /// + /// An identifier for a connection might be: + /// - the subscription ID for chainHead_follow + /// - the operation ID for the transactionBroadcast API + /// - or simply how many times the transaction API has been called. + /// + /// # Note + /// + /// Because a pending subscription sink does not expose the future subscription ID, + /// we cannot register a subscription ID before the pending subscription is accepted. + /// This variable ensures that we have enough capacity to register an identifier, after + /// the subscription is accepted. Otherwise, a jsonrpc error object should be returned. + num_identifiers: usize, + /// Active registered identifiers for the given connection. + /// + /// # Note + /// + /// For chainHead, this represents the subscription ID. + /// For transactionBroadcast, this represents the operation ID. + /// For transaction, this is empty and the number of active calls is tracked by + /// [`Self::num_identifiers`]. + identifiers: HashSet, +} + +impl RpcConnections { + /// Constructs a new instance of [`RpcConnections`]. + pub fn new(capacity: usize) -> Self { + RpcConnections { capacity, data: Default::default() } + } + + /// Reserve space for a new connection identifier. + /// + /// If the number of active identifiers for the given connection exceeds the capacity, + /// returns None. + pub fn reserve_space(&self, connection_id: ConnectionId) -> Option { + let mut data = self.data.lock(); + + let entry = data.entry(connection_id).or_insert_with(ConnectionData::default); + if entry.num_identifiers >= self.capacity { + return None; + } + entry.num_identifiers = entry.num_identifiers.saturating_add(1); + + Some(ReservedConnection { connection_id, rpc_connections: Some(self.clone()) }) + } + + /// Gives back the reserved space before the connection identifier is registered. + /// + /// # Note + /// + /// This may happen if the pending subscription cannot be accepted (unlikely). + fn unreserve_space(&self, connection_id: ConnectionId) { + let mut data = self.data.lock(); + + let entry = data.entry(connection_id).or_insert_with(ConnectionData::default); + entry.num_identifiers = entry.num_identifiers.saturating_sub(1); + + if entry.num_identifiers == 0 { + data.remove(&connection_id); + } + } + + /// Register an identifier for the given connection. + /// + /// Users must call [`Self::reserve_space`] before calling this method to ensure enough + /// space is available. + /// + /// Returns true if the identifier was inserted successfully, false if the identifier was + /// already inserted or reached capacity. + fn register_identifier(&self, connection_id: ConnectionId, identifier: String) -> bool { + let mut data = self.data.lock(); + + let entry = data.entry(connection_id).or_insert_with(ConnectionData::default); + // Should be already checked `Self::reserve_space`. + if entry.identifiers.len() >= self.capacity { + return false; + } + + entry.identifiers.insert(identifier) + } + + /// Unregister an identifier for the given connection. + fn unregister_identifier(&self, connection_id: ConnectionId, identifier: &str) { + let mut data = self.data.lock(); + if let Some(connection_data) = data.get_mut(&connection_id) { + connection_data.identifiers.remove(identifier); + connection_data.num_identifiers = connection_data.num_identifiers.saturating_sub(1); + + if connection_data.num_identifiers == 0 { + data.remove(&connection_id); + } + } + } + + /// Check if the given connection contains the given identifier. + pub fn contains_identifier(&self, connection_id: ConnectionId, identifier: &str) -> bool { + let data = self.data.lock(); + data.get(&connection_id) + .map(|connection_data| connection_data.identifiers.contains(identifier)) + .unwrap_or(false) + } +} + +/// RAII wrapper that ensures the reserved space is given back if the object is +/// dropped before the identifier is registered. +pub struct ReservedConnection { + connection_id: ConnectionId, + rpc_connections: Option, +} + +impl ReservedConnection { + /// Register the identifier for the given connection. + pub fn register(mut self, identifier: String) -> Option { + let rpc_connections = self.rpc_connections.take()?; + + if rpc_connections.register_identifier(self.connection_id, identifier.clone()) { + Some(RegisteredConnection { + connection_id: self.connection_id, + identifier, + rpc_connections, + }) + } else { + None + } + } +} + +impl Drop for ReservedConnection { + fn drop(&mut self) { + if let Some(rpc_connections) = self.rpc_connections.take() { + rpc_connections.unreserve_space(self.connection_id); + } + } +} + +/// RAII wrapper that ensures the identifier is unregistered if the object is dropped. +pub struct RegisteredConnection { + connection_id: ConnectionId, + identifier: String, + rpc_connections: RpcConnections, +} + +impl Drop for RegisteredConnection { + fn drop(&mut self) { + self.rpc_connections.unregister_identifier(self.connection_id, &self.identifier); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn reserve_space() { + let rpc_connections = RpcConnections::new(2); + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_some()); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert_eq!(rpc_connections.data.lock().len(), 1); + + let reserved = reserved.unwrap(); + let registered = reserved.register("identifier1".to_string()).unwrap(); + assert!(rpc_connections.contains_identifier(1, "identifier1")); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + drop(registered); + + // Data is dropped. + assert!(rpc_connections.data.lock().get(&1).is_none()); + assert!(rpc_connections.data.lock().is_empty()); + // Checks can still happen. + assert!(!rpc_connections.contains_identifier(1, "identifier1")); + } + + #[test] + fn reserve_space_capacity_reached() { + let rpc_connections = RpcConnections::new(2); + + // Reserve identifier for connection 1. + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_some()); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Add identifier for connection 1. + let reserved = reserved.unwrap(); + let registered = reserved.register("identifier1".to_string()).unwrap(); + assert!(rpc_connections.contains_identifier(1, "identifier1")); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Reserve identifier for connection 1 again. + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_some()); + assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Add identifier for connection 1 again. + let reserved = reserved.unwrap(); + let registered_second = reserved.register("identifier2".to_string()).unwrap(); + assert!(rpc_connections.contains_identifier(1, "identifier2")); + assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Cannot reserve more identifiers. + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_none()); + + // Drop the first identifier. + drop(registered); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert!(rpc_connections.contains_identifier(1, "identifier2")); + assert!(!rpc_connections.contains_identifier(1, "identifier1")); + + // Can reserve again after clearing the space. + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_some()); + assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Ensure data is cleared. + drop(reserved); + drop(registered_second); + assert!(rpc_connections.data.lock().get(&1).is_none()); + } +} diff --git a/substrate/client/rpc-spec-v2/src/common/mod.rs b/substrate/client/rpc-spec-v2/src/common/mod.rs index ac1af8fce3c..3167561d649 100644 --- a/substrate/client/rpc-spec-v2/src/common/mod.rs +++ b/substrate/client/rpc-spec-v2/src/common/mod.rs @@ -13,5 +13,6 @@ //! Common types and functionality for the RPC-V2 spec. +pub mod connections; pub mod events; pub mod storage; -- GitLab From 8e95a3e1aa9c1c7ec5ca468f46714f1a5eff2485 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 2 Apr 2024 16:41:01 +0300 Subject: [PATCH 083/128] Align dependencies with `parity-bridges-common` (#3937) Working towards migrating the `parity-bridges-common` repo inside `polkadot-sdk`. This PR upgrades some dependencies in order to align them with the versions used in `parity-bridges-common` Related to https://github.com/paritytech/parity-bridges-common/issues/2538 --- Cargo.lock | 116 +++++++++++------- .../outbound-queue/merkle-tree/Cargo.toml | 2 +- .../snowbridge/runtime/test-common/Cargo.toml | 2 +- cumulus/client/collator/Cargo.toml | 4 +- cumulus/client/consensus/aura/Cargo.toml | 2 +- cumulus/client/consensus/common/Cargo.toml | 2 +- cumulus/client/consensus/proposer/Cargo.toml | 2 +- .../client/consensus/relay-chain/Cargo.toml | 2 +- cumulus/client/network/Cargo.toml | 2 +- cumulus/client/parachain-inherent/Cargo.toml | 4 +- cumulus/client/pov-recovery/Cargo.toml | 2 +- .../Cargo.toml | 2 +- .../client/relay-chain-interface/Cargo.toml | 2 +- .../relay-chain-minimal-node/Cargo.toml | 2 +- .../relay-chain-rpc-interface/Cargo.toml | 2 +- cumulus/pallets/aura-ext/Cargo.toml | 2 +- cumulus/pallets/collator-selection/Cargo.toml | 2 +- cumulus/pallets/dmp-queue/Cargo.toml | 2 +- cumulus/pallets/parachain-system/Cargo.toml | 2 +- cumulus/pallets/solo-to-para/Cargo.toml | 2 +- cumulus/pallets/xcm/Cargo.toml | 2 +- cumulus/pallets/xcmp-queue/Cargo.toml | 2 +- cumulus/parachains/common/Cargo.toml | 2 +- .../bridges/bridge-hub-rococo/Cargo.toml | 2 +- .../pallets/collective-content/Cargo.toml | 2 +- .../pallets/parachain-info/Cargo.toml | 2 +- cumulus/parachains/pallets/ping/Cargo.toml | 2 +- .../assets/asset-hub-rococo/Cargo.toml | 2 +- .../assets/asset-hub-westend/Cargo.toml | 2 +- .../runtimes/assets/common/Cargo.toml | 2 +- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 2 +- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 2 +- .../runtimes/bridge-hubs/common/Cargo.toml | 2 +- .../collectives-westend/Cargo.toml | 2 +- .../contracts/contracts-rococo/Cargo.toml | 2 +- .../glutton/glutton-westend/Cargo.toml | 2 +- .../runtimes/starters/seedling/Cargo.toml | 2 +- .../runtimes/starters/shell/Cargo.toml | 2 +- .../runtimes/testing/penpal/Cargo.toml | 2 +- .../testing/rococo-parachain/Cargo.toml | 2 +- cumulus/polkadot-parachain/Cargo.toml | 2 +- cumulus/primitives/core/Cargo.toml | 2 +- .../primitives/parachain-inherent/Cargo.toml | 4 +- .../storage-weight-reclaim/Cargo.toml | 2 +- cumulus/test/runtime/Cargo.toml | 2 +- cumulus/test/service/Cargo.toml | 2 +- polkadot/Cargo.toml | 2 +- polkadot/cli/Cargo.toml | 2 +- polkadot/core-primitives/Cargo.toml | 2 +- polkadot/node/collation-generation/Cargo.toml | 2 +- polkadot/node/core/approval-voting/Cargo.toml | 6 +- polkadot/node/core/av-store/Cargo.toml | 4 +- polkadot/node/core/backing/Cargo.toml | 4 +- .../node/core/bitfield-signing/Cargo.toml | 2 +- .../node/core/candidate-validation/Cargo.toml | 6 +- polkadot/node/core/chain-api/Cargo.toml | 4 +- polkadot/node/core/chain-selection/Cargo.toml | 2 +- .../node/core/dispute-coordinator/Cargo.toml | 2 +- .../node/core/parachains-inherent/Cargo.toml | 4 +- .../core/prospective-parachains/Cargo.toml | 2 +- polkadot/node/core/provisioner/Cargo.toml | 2 +- polkadot/node/core/pvf-checker/Cargo.toml | 2 +- polkadot/node/core/pvf/Cargo.toml | 2 +- polkadot/node/core/pvf/common/Cargo.toml | 2 +- polkadot/node/core/runtime-api/Cargo.toml | 6 +- polkadot/node/jaeger/Cargo.toml | 2 +- polkadot/node/malus/Cargo.toml | 6 +- polkadot/node/metrics/Cargo.toml | 4 +- .../network/approval-distribution/Cargo.toml | 4 +- .../availability-distribution/Cargo.toml | 2 +- .../network/availability-recovery/Cargo.toml | 8 +- .../network/bitfield-distribution/Cargo.toml | 4 +- polkadot/node/network/bridge/Cargo.toml | 4 +- .../node/network/collator-protocol/Cargo.toml | 4 +- .../network/dispute-distribution/Cargo.toml | 4 +- .../node/network/gossip-support/Cargo.toml | 4 +- polkadot/node/network/protocol/Cargo.toml | 6 +- .../network/statement-distribution/Cargo.toml | 2 +- polkadot/node/overseer/Cargo.toml | 6 +- polkadot/node/primitives/Cargo.toml | 2 +- polkadot/node/service/Cargo.toml | 6 +- polkadot/node/subsystem-bench/Cargo.toml | 6 +- .../node/subsystem-test-helpers/Cargo.toml | 4 +- polkadot/node/subsystem-types/Cargo.toml | 4 +- polkadot/node/subsystem-util/Cargo.toml | 8 +- polkadot/node/test/client/Cargo.toml | 2 +- polkadot/node/test/service/Cargo.toml | 6 +- polkadot/parachain/Cargo.toml | 2 +- .../test-parachains/adder/collator/Cargo.toml | 2 +- .../undying/collator/Cargo.toml | 2 +- polkadot/primitives/Cargo.toml | 2 +- polkadot/runtime/common/Cargo.toml | 2 +- polkadot/runtime/parachains/Cargo.toml | 4 +- polkadot/runtime/rococo/Cargo.toml | 2 +- polkadot/runtime/test-runtime/Cargo.toml | 2 +- polkadot/runtime/westend/Cargo.toml | 2 +- polkadot/xcm/Cargo.toml | 2 +- polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml | 2 +- polkadot/xcm/pallet-xcm/Cargo.toml | 2 +- polkadot/xcm/xcm-builder/Cargo.toml | 2 +- polkadot/xcm/xcm-executor/Cargo.toml | 2 +- .../xcm-executor/integration-tests/Cargo.toml | 2 +- .../xcm-fee-payment-runtime-api/Cargo.toml | 2 +- polkadot/xcm/xcm-simulator/example/Cargo.toml | 2 +- polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml | 2 +- substrate/bin/node/bench/Cargo.toml | 2 +- substrate/bin/node/cli/Cargo.toml | 6 +- substrate/bin/node/runtime/Cargo.toml | 2 +- substrate/bin/node/testing/Cargo.toml | 2 +- substrate/client/api/Cargo.toml | 2 +- .../client/authority-discovery/Cargo.toml | 4 +- substrate/client/basic-authorship/Cargo.toml | 2 +- substrate/client/cli/Cargo.toml | 2 +- substrate/client/consensus/aura/Cargo.toml | 4 +- substrate/client/consensus/babe/Cargo.toml | 6 +- .../client/consensus/babe/rpc/Cargo.toml | 4 +- substrate/client/consensus/beefy/Cargo.toml | 6 +- .../client/consensus/beefy/rpc/Cargo.toml | 2 +- substrate/client/consensus/common/Cargo.toml | 4 +- substrate/client/consensus/grandpa/Cargo.toml | 6 +- .../client/consensus/grandpa/rpc/Cargo.toml | 2 +- .../client/consensus/manual-seal/Cargo.toml | 4 +- substrate/client/consensus/pow/Cargo.toml | 4 +- substrate/client/consensus/slots/Cargo.toml | 4 +- substrate/client/executor/Cargo.toml | 2 +- substrate/client/informant/Cargo.toml | 2 +- .../client/merkle-mountain-range/Cargo.toml | 4 +- substrate/client/mixnet/Cargo.toml | 2 +- substrate/client/network-gossip/Cargo.toml | 6 +- substrate/client/network/Cargo.toml | 4 +- substrate/client/network/bitswap/Cargo.toml | 2 +- substrate/client/network/common/Cargo.toml | 4 +- substrate/client/network/light/Cargo.toml | 2 +- substrate/client/network/statement/Cargo.toml | 2 +- substrate/client/network/sync/Cargo.toml | 4 +- substrate/client/network/test/Cargo.toml | 6 +- .../client/network/transactions/Cargo.toml | 2 +- substrate/client/offchain/Cargo.toml | 4 +- substrate/client/rpc-api/Cargo.toml | 2 +- substrate/client/rpc-servers/Cargo.toml | 2 +- substrate/client/rpc-spec-v2/Cargo.toml | 2 +- substrate/client/rpc/Cargo.toml | 8 +- substrate/client/service/Cargo.toml | 4 +- substrate/client/service/test/Cargo.toml | 2 +- substrate/client/statement-store/Cargo.toml | 2 +- substrate/client/sysinfo/Cargo.toml | 2 +- substrate/client/telemetry/Cargo.toml | 2 +- substrate/client/transaction-pool/Cargo.toml | 4 +- .../client/transaction-pool/api/Cargo.toml | 4 +- substrate/client/utils/Cargo.toml | 2 +- substrate/frame/Cargo.toml | 2 +- substrate/frame/alliance/Cargo.toml | 2 +- substrate/frame/asset-conversion/Cargo.toml | 2 +- substrate/frame/asset-rate/Cargo.toml | 2 +- substrate/frame/assets/Cargo.toml | 2 +- substrate/frame/atomic-swap/Cargo.toml | 2 +- substrate/frame/aura/Cargo.toml | 2 +- .../frame/authority-discovery/Cargo.toml | 2 +- substrate/frame/authorship/Cargo.toml | 2 +- substrate/frame/babe/Cargo.toml | 2 +- substrate/frame/bags-list/Cargo.toml | 2 +- substrate/frame/balances/Cargo.toml | 2 +- substrate/frame/beefy-mmr/Cargo.toml | 2 +- substrate/frame/beefy/Cargo.toml | 2 +- substrate/frame/benchmarking/Cargo.toml | 2 +- substrate/frame/benchmarking/pov/Cargo.toml | 2 +- substrate/frame/bounties/Cargo.toml | 2 +- substrate/frame/broker/Cargo.toml | 2 +- substrate/frame/child-bounties/Cargo.toml | 2 +- substrate/frame/collective/Cargo.toml | 2 +- substrate/frame/contracts/Cargo.toml | 4 +- .../frame/contracts/mock-network/Cargo.toml | 2 +- substrate/frame/contracts/uapi/Cargo.toml | 2 +- substrate/frame/conviction-voting/Cargo.toml | 2 +- substrate/frame/core-fellowship/Cargo.toml | 2 +- substrate/frame/democracy/Cargo.toml | 2 +- .../election-provider-multi-phase/Cargo.toml | 4 +- .../test-staking-e2e/Cargo.toml | 2 +- .../election-provider-support/Cargo.toml | 2 +- .../solution-type/Cargo.toml | 2 +- .../solution-type/fuzzer/Cargo.toml | 2 +- substrate/frame/elections-phragmen/Cargo.toml | 2 +- substrate/frame/examples/basic/Cargo.toml | 2 +- .../frame/examples/default-config/Cargo.toml | 2 +- substrate/frame/examples/dev-mode/Cargo.toml | 2 +- .../frame/examples/frame-crate/Cargo.toml | 2 +- .../frame/examples/kitchensink/Cargo.toml | 2 +- .../frame/examples/offchain-worker/Cargo.toml | 2 +- .../single-block-migrations/Cargo.toml | 2 +- substrate/frame/examples/split/Cargo.toml | 2 +- substrate/frame/examples/tasks/Cargo.toml | 2 +- substrate/frame/executive/Cargo.toml | 2 +- substrate/frame/fast-unstake/Cargo.toml | 2 +- substrate/frame/glutton/Cargo.toml | 2 +- substrate/frame/grandpa/Cargo.toml | 2 +- substrate/frame/identity/Cargo.toml | 2 +- substrate/frame/im-online/Cargo.toml | 2 +- substrate/frame/indices/Cargo.toml | 2 +- .../Cargo.toml | 2 +- substrate/frame/lottery/Cargo.toml | 2 +- substrate/frame/membership/Cargo.toml | 2 +- .../frame/merkle-mountain-range/Cargo.toml | 4 +- substrate/frame/message-queue/Cargo.toml | 2 +- substrate/frame/migrations/Cargo.toml | 2 +- substrate/frame/mixnet/Cargo.toml | 2 +- substrate/frame/multisig/Cargo.toml | 2 +- .../frame/nft-fractionalization/Cargo.toml | 2 +- substrate/frame/nfts/Cargo.toml | 2 +- substrate/frame/nis/Cargo.toml | 2 +- substrate/frame/node-authorization/Cargo.toml | 2 +- substrate/frame/nomination-pools/Cargo.toml | 2 +- .../nomination-pools/benchmarking/Cargo.toml | 2 +- .../nomination-pools/test-staking/Cargo.toml | 2 +- substrate/frame/offences/Cargo.toml | 2 +- .../frame/offences/benchmarking/Cargo.toml | 2 +- substrate/frame/paged-list/Cargo.toml | 2 +- substrate/frame/parameters/Cargo.toml | 2 +- substrate/frame/preimage/Cargo.toml | 2 +- substrate/frame/proxy/Cargo.toml | 2 +- substrate/frame/ranked-collective/Cargo.toml | 2 +- substrate/frame/recovery/Cargo.toml | 2 +- substrate/frame/referenda/Cargo.toml | 2 +- substrate/frame/remark/Cargo.toml | 2 +- substrate/frame/root-offences/Cargo.toml | 2 +- substrate/frame/root-testing/Cargo.toml | 2 +- substrate/frame/safe-mode/Cargo.toml | 2 +- substrate/frame/salary/Cargo.toml | 2 +- substrate/frame/sassafras/Cargo.toml | 2 +- substrate/frame/scheduler/Cargo.toml | 2 +- substrate/frame/scored-pool/Cargo.toml | 2 +- substrate/frame/session/Cargo.toml | 2 +- .../frame/session/benchmarking/Cargo.toml | 2 +- substrate/frame/society/Cargo.toml | 2 +- substrate/frame/staking/Cargo.toml | 2 +- .../frame/state-trie-migration/Cargo.toml | 2 +- substrate/frame/statement/Cargo.toml | 2 +- substrate/frame/sudo/Cargo.toml | 2 +- substrate/frame/support/Cargo.toml | 2 +- substrate/frame/support/test/Cargo.toml | 2 +- .../support/test/compile_pass/Cargo.toml | 2 +- .../frame/support/test/pallet/Cargo.toml | 2 +- .../support/test/stg_frame_crate/Cargo.toml | 2 +- substrate/frame/system/Cargo.toml | 2 +- .../frame/system/benchmarking/Cargo.toml | 2 +- substrate/frame/timestamp/Cargo.toml | 2 +- substrate/frame/tips/Cargo.toml | 2 +- .../frame/transaction-payment/Cargo.toml | 2 +- .../asset-conversion-tx-payment/Cargo.toml | 2 +- .../asset-tx-payment/Cargo.toml | 2 +- .../frame/transaction-storage/Cargo.toml | 2 +- substrate/frame/treasury/Cargo.toml | 2 +- substrate/frame/tx-pause/Cargo.toml | 2 +- substrate/frame/uniques/Cargo.toml | 2 +- substrate/frame/utility/Cargo.toml | 2 +- substrate/frame/vesting/Cargo.toml | 2 +- substrate/frame/whitelist/Cargo.toml | 2 +- substrate/primitives/api/Cargo.toml | 2 +- substrate/primitives/api/test/Cargo.toml | 4 +- .../primitives/application-crypto/Cargo.toml | 2 +- substrate/primitives/arithmetic/Cargo.toml | 2 +- .../primitives/authority-discovery/Cargo.toml | 2 +- substrate/primitives/blockchain/Cargo.toml | 2 +- .../primitives/consensus/aura/Cargo.toml | 4 +- .../primitives/consensus/babe/Cargo.toml | 4 +- .../primitives/consensus/beefy/Cargo.toml | 4 +- .../primitives/consensus/common/Cargo.toml | 6 +- .../primitives/consensus/grandpa/Cargo.toml | 2 +- .../primitives/consensus/sassafras/Cargo.toml | 2 +- .../primitives/consensus/slots/Cargo.toml | 2 +- substrate/primitives/core/Cargo.toml | 2 +- substrate/primitives/inherents/Cargo.toml | 6 +- substrate/primitives/keyring/Cargo.toml | 2 +- .../merkle-mountain-range/Cargo.toml | 2 +- substrate/primitives/metadata-ir/Cargo.toml | 2 +- substrate/primitives/mixnet/Cargo.toml | 2 +- .../primitives/npos-elections/Cargo.toml | 2 +- substrate/primitives/runtime/Cargo.toml | 2 +- substrate/primitives/session/Cargo.toml | 2 +- substrate/primitives/staking/Cargo.toml | 2 +- .../primitives/statement-store/Cargo.toml | 2 +- .../primitives/test-primitives/Cargo.toml | 2 +- substrate/primitives/timestamp/Cargo.toml | 2 +- .../transaction-storage-proof/Cargo.toml | 4 +- substrate/primitives/trie/Cargo.toml | 2 +- substrate/primitives/version/Cargo.toml | 2 +- substrate/primitives/weights/Cargo.toml | 2 +- substrate/test-utils/Cargo.toml | 2 +- substrate/test-utils/client/Cargo.toml | 4 +- substrate/test-utils/runtime/Cargo.toml | 4 +- .../test-utils/runtime/client/Cargo.toml | 2 +- .../runtime/transaction-pool/Cargo.toml | 2 +- substrate/utils/binary-merkle-tree/Cargo.toml | 2 +- .../frame/remote-externalities/Cargo.toml | 2 +- substrate/utils/frame/rpc/client/Cargo.toml | 2 +- substrate/utils/frame/rpc/support/Cargo.toml | 4 +- substrate/utils/frame/rpc/system/Cargo.toml | 4 +- .../utils/frame/try-runtime/cli/Cargo.toml | 4 +- substrate/utils/wasm-builder/Cargo.toml | 2 +- templates/minimal/node/Cargo.toml | 2 +- templates/minimal/pallets/template/Cargo.toml | 2 +- .../parachain/pallets/template/Cargo.toml | 2 +- templates/parachain/runtime/Cargo.toml | 2 +- templates/solochain/node/Cargo.toml | 2 +- .../solochain/pallets/template/Cargo.toml | 2 +- templates/solochain/runtime/Cargo.toml | 2 +- 305 files changed, 466 insertions(+), 434 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 24612391d3f..9393f8d606d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -275,9 +275,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.2" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c4c2c83f81532e5845a733998b6971faca23490340a418e9b72a3ec9de12ea" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "anstyle-parse" @@ -1239,9 +1239,9 @@ checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", @@ -1393,7 +1393,7 @@ name = "binary-merkle-tree" version = "13.0.0" dependencies = [ "array-bytes 6.1.0", - "env_logger 0.9.3", + "env_logger 0.11.3", "hash-db", "log", "sp-core", @@ -4991,10 +4991,10 @@ dependencies = [ ] [[package]] -name = "env_logger" -version = "0.8.4" +name = "env_filter" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" dependencies = [ "log", "regex", @@ -5002,15 +5002,12 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.9.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" dependencies = [ - "atty", - "humantime", "log", "regex", - "termcolor", ] [[package]] @@ -5026,6 +5023,19 @@ dependencies = [ "termcolor", ] +[[package]] +name = "env_logger" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "humantime", + "log", +] + [[package]] name = "environmental" version = "1.1.4" @@ -6758,7 +6768,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.3", + "socket2 0.5.6", "widestring", "windows-sys 0.48.0", "winreg", @@ -9520,7 +9530,7 @@ dependencies = [ "array-bytes 6.1.0", "assert_matches", "bitflags 1.3.2", - "env_logger 0.9.3", + "env_logger 0.11.3", "environmental", "frame-benchmarking", "frame-support", @@ -9767,7 +9777,7 @@ dependencies = [ "sp-runtime", "sp-std 14.0.0", "sp-tracing 16.0.0", - "strum 0.24.1", + "strum 0.26.2", ] [[package]] @@ -10188,7 +10198,7 @@ name = "pallet-mmr" version = "27.0.0" dependencies = [ "array-bytes 6.1.0", - "env_logger 0.9.3", + "env_logger 0.11.3", "frame-benchmarking", "frame-support", "frame-system", @@ -12080,7 +12090,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "itertools 0.10.5", @@ -12110,7 +12120,7 @@ dependencies = [ "always-assert", "assert_matches", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "log", @@ -12166,7 +12176,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "async-trait", - "env_logger 0.9.3", + "env_logger 0.11.3", "fatality", "futures", "futures-timer", @@ -12228,7 +12238,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "fatality", "futures", "futures-timer", @@ -12400,7 +12410,7 @@ dependencies = [ "async-trait", "bitvec", "derive_more", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "itertools 0.10.5", @@ -12442,7 +12452,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "kvdb", @@ -12884,7 +12894,7 @@ dependencies = [ "rand_chacha 0.3.1", "sc-authority-discovery", "sc-network", - "strum 0.24.1", + "strum 0.26.2", "thiserror", "tracing-gum", ] @@ -12976,7 +12986,7 @@ dependencies = [ "assert_matches", "async-trait", "derive_more", - "env_logger 0.9.3", + "env_logger 0.11.3", "fatality", "futures", "futures-channel", @@ -13420,7 +13430,7 @@ dependencies = [ "assert_matches", "async-trait", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "frame-benchmarking", "frame-benchmarking-cli", "frame-support", @@ -13596,7 +13606,7 @@ dependencies = [ "clap-num", "color-eyre", "colored", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "hex", @@ -16130,7 +16140,7 @@ dependencies = [ "array-bytes 6.1.0", "assert_matches", "criterion 0.4.0", - "env_logger 0.9.3", + "env_logger 0.11.3", "num_cpus", "parity-scale-codec", "parking_lot 0.12.1", @@ -16566,7 +16576,7 @@ name = "sc-rpc" version = "29.0.0" dependencies = [ "assert_matches", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "jsonrpsee", "log", @@ -16808,7 +16818,7 @@ dependencies = [ name = "sc-statement-store" version = "10.0.0" dependencies = [ - "env_logger 0.9.3", + "env_logger 0.11.3", "log", "parity-db", "parking_lot 0.12.1", @@ -17891,7 +17901,7 @@ name = "snowbridge-outbound-queue-merkle-tree" version = "0.3.0" dependencies = [ "array-bytes 4.2.0", - "env_logger 0.9.3", + "env_logger 0.11.3", "hex", "hex-literal", "parity-scale-codec", @@ -18201,12 +18211,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.3" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -18537,7 +18547,7 @@ dependencies = [ "sp-keystore", "sp-mmr-primitives", "sp-runtime", - "strum 0.24.1", + "strum 0.26.2", "w3f-bls", ] @@ -18828,7 +18838,7 @@ version = "31.0.0" dependencies = [ "sp-core", "sp-runtime", - "strum 0.24.1", + "strum 0.26.2", ] [[package]] @@ -19714,6 +19724,15 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +[[package]] +name = "strum" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" +dependencies = [ + "strum_macros 0.26.2", +] + [[package]] name = "strum_macros" version = "0.24.3" @@ -19740,6 +19759,19 @@ dependencies = [ "syn 2.0.53", ] +[[package]] +name = "strum_macros" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.53", +] + [[package]] name = "subkey" version = "9.0.0" @@ -20005,7 +20037,7 @@ dependencies = [ "parity-wasm", "polkavm-linker", "sp-maybe-compressed-blob", - "strum 0.24.1", + "strum 0.26.2", "tempfile", "toml 0.8.8", "walkdir", @@ -20553,9 +20585,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.33.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -20565,16 +20597,16 @@ dependencies = [ "parking_lot 0.12.1", "pin-project-lite 0.2.12", "signal-hook-registry", - "socket2 0.5.3", + "socket2 0.5.6", "tokio-macros", "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml index 0606e9de330..5315d6b4adb 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -23,7 +23,7 @@ sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-fea [dev-dependencies] hex-literal = { version = "0.4.1" } -env_logger = "0.9" +env_logger = "0.11" hex = "0.4" array-bytes = "4.1" sp-crypto-hashing = { path = "../../../../../substrate/primitives/crypto/hashing" } diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 4e8b311cb97..90b4f38e721 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -14,7 +14,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } smallvec = "1.11.0" diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index 0e911b9f3ab..42f7342d1a5 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] parking_lot = "0.12.1" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" tracing = "0.1.25" # Substrate @@ -34,7 +34,7 @@ cumulus-client-network = { path = "../network" } cumulus-primitives-core = { path = "../../primitives/core" } [dev-dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" # Substrate sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 58bb1dd5914..70dd67cb9a0 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.28" tracing = "0.1.37" diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 5a014b10e35..fb4a85ad122 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } dyn-clone = "1.0.16" futures = "0.3.28" diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index b37232bb448..42ca4e06f8f 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] anyhow = "1.0" -async-trait = "0.1.74" +async-trait = "0.1.79" thiserror = { workspace = true } # Substrate diff --git a/cumulus/client/consensus/relay-chain/Cargo.toml b/cumulus/client/consensus/relay-chain/Cargo.toml index 3d06d6b89ef..cb32b980457 100644 --- a/cumulus/client/consensus/relay-chain/Cargo.toml +++ b/cumulus/client/consensus/relay-chain/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" futures = "0.3.28" parking_lot = "0.12.1" tracing = "0.1.37" diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index 995ef606d27..1210975ef69 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.28" futures-timer = "3.0.2" diff --git a/cumulus/client/parachain-inherent/Cargo.toml b/cumulus/client/parachain-inherent/Cargo.toml index e00f3ba2606..6e9adab1ffc 100644 --- a/cumulus/client/parachain-inherent/Cargo.toml +++ b/cumulus/client/parachain-inherent/Cargo.toml @@ -7,9 +7,9 @@ description = "Inherent that needs to be present in every parachain block. Conta license = "Apache-2.0" [dependencies] -async-trait = "0.1.73" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } tracing = { version = "0.1.37" } # Substrate diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 375a57a87c2..571935620d6 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -32,7 +32,7 @@ polkadot-primitives = { path = "../../../polkadot/primitives" } # Cumulus cumulus-primitives-core = { path = "../../primitives/core" } cumulus-relay-chain-interface = { path = "../relay-chain-interface" } -async-trait = "0.1.74" +async-trait = "0.1.79" [dev-dependencies] tokio = { version = "1.32.0", features = ["macros"] } diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index aa16230cd8a..7629b6c631a 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" futures = "0.3.28" futures-timer = "3.0.2" diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index 6e652b89210..6df9847252f 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -20,7 +20,7 @@ sp-state-machine = { path = "../../../substrate/primitives/state-machine" } sc-client-api = { path = "../../../substrate/client/api" } futures = "0.3.28" -async-trait = "0.1.74" +async-trait = "0.1.79" thiserror = { workspace = true } jsonrpsee-core = "0.22" parity-scale-codec = "3.6.4" diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 98240c92ada..6860b42a507 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -49,6 +49,6 @@ cumulus-primitives-core = { path = "../../primitives/core" } array-bytes = "6.1" tracing = "0.1.37" -async-trait = "0.1.74" +async-trait = "0.1.79" futures = "0.3.28" parking_lot = "0.12.1" diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 801712b1ad1..14981677289 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -35,7 +35,7 @@ futures-timer = "3.0.2" parity-scale-codec = "3.6.4" jsonrpsee = { version = "0.22", features = ["ws-client"] } tracing = "0.1.37" -async-trait = "0.1.74" +async-trait = "0.1.79" url = "2.4.0" serde_json = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index ff30dce7b03..fe717596f9b 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/pallets/collator-selection/Cargo.toml b/cumulus/pallets/collator-selection/Cargo.toml index 241a78466d6..c04d9e1403e 100644 --- a/cumulus/pallets/collator-selection/Cargo.toml +++ b/cumulus/pallets/collator-selection/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] log = { workspace = true } codec = { default-features = false, features = ["derive"], package = "parity-scale-codec", version = "3.0.0" } rand = { version = "0.8.5", features = ["std_rng"], default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/pallets/dmp-queue/Cargo.toml b/cumulus/pallets/dmp-queue/Cargo.toml index 83ed994d041..b2b24aeed72 100644 --- a/cumulus/pallets/dmp-queue/Cargo.toml +++ b/cumulus/pallets/dmp-queue/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 7e0442f0b58..a905df5b94a 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -16,7 +16,7 @@ environmental = { version = "1.1.4", default-features = false } impl-trait-for-tuples = "0.2.1" log = { workspace = true } trie-db = { version = "0.28.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index f7dc5fe4de3..417038d7833 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index 63cb14b16e7..9122e110fb9 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index 9078d5eda99..ab196c6d3ec 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index ebc9f822beb..fa16205d0fd 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index 18c39f895fa..010c252658c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } hex-literal = "0.4.1" # Substrate diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index d4290dd2de2..b3fac47cb4a 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../../substrate/frame/support", default-features = false } diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index 0e2f965e1cf..17981d238fd 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system", default-features = false } diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index 1afd55eb0b9..15169b08b91 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 53abb620022..f5ea0937dec 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 0f8a1182cd7..b792d64c03e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index c9252375cfb..12dfd9da1ff 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } impl-trait-for-tuples = "0.2.2" diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 13b4b624eef..1dd4f499b4d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = ] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 0c46e6c2e14..1501ed12e3a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -16,7 +16,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml index a4dcd19dc9e..2ab6ee7995f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml @@ -8,7 +8,7 @@ license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../../../substrate/frame/support", default-features = false } sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index 4224b397139..9c3acf6ad93 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index e4ac2016a72..a0aeb642df0 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -19,7 +19,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index a357bf519e4..fe9cd25841b 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index 9f08fdf5943..eb702c9f2cd 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index 2f82547afe9..f66d04fec1f 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index c18f6571f41..028aa002a91 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -21,7 +21,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } smallvec = "1.11.0" # Substrate diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 790f38d94f5..df3aaa92c79 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 37b7be75ef9..280ece30fb6 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -15,7 +15,7 @@ name = "polkadot-parachain" path = "src/main.rs" [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" clap = { version = "4.5.3", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.28" diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 32c5054f359..62c3f675191 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate sp-api = { path = "../../../substrate/primitives/api", default-features = false } diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index f434305a0ce..fcf4c93bc2f 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -10,9 +10,9 @@ license = "Apache-2.0" workspace = true [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate sp-core = { path = "../../../substrate/primitives/core", default-features = false } diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index 54eec3ffb5e..6dbf7904bf7 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 449a8b819bc..b430b118fa1 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -10,7 +10,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-executive = { path = "../../../substrate/frame/executive", default-features = false } diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 113e0aca68a..040fb479f6e 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -13,7 +13,7 @@ name = "test-parachain" path = "src/main.rs" [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" clap = { version = "4.5.3", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } criterion = { version = "0.5.1", features = ["async_tokio"] } diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index 883568b23f7..659edcb041c 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -45,7 +45,7 @@ tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_suppo assert_cmd = "2.0.4" nix = { version = "0.26.1", features = ["signal"] } tempfile = "3.2.0" -tokio = "1.24.2" +tokio = "1.37" substrate-rpc-client = { path = "../substrate/utils/frame/rpc/client" } polkadot-core-primitives = { path = "core-primitives" } diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index f57efa7ba43..b0c22c5a97f 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -22,7 +22,7 @@ cfg-if = "1.0" clap = { version = "4.5.3", features = ["derive"], optional = true } log = { workspace = true, default-features = true } thiserror = { workspace = true } -futures = "0.3.21" +futures = "0.3.30" pyro = { package = "pyroscope", version = "0.5.3", optional = true } pyroscope_pprofrs = { version = "0.2", optional = true } diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index d3aef89cb74..8dfa0b87328 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -13,7 +13,7 @@ workspace = true sp-core = { path = "../../substrate/primitives/core", default-features = false } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } [features] diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index f72af87c15e..ebc53a9e01b 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -10,7 +10,7 @@ description = "Collator-side subsystem that handles incoming candidate submissio workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../gum" } polkadot-erasure-coding = { path = "../../erasure-coding" } polkadot-node-primitives = { path = "../primitives" } diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index 2a5b6198b9a..ced7706c40a 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -10,7 +10,7 @@ description = "Approval Voting Subsystem of the Polkadot node" workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } gum = { package = "tracing-gum", path = "../../gum" } @@ -41,7 +41,7 @@ rand_chacha = { version = "0.3.1" } rand = "0.8.5" [dev-dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" parking_lot = "0.12.1" sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } @@ -52,4 +52,4 @@ assert_matches = "1.4.0" kvdb-memorydb = "0.13.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index 05212da7479..bc9b979228a 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" kvdb = "0.13.0" thiserror = { workspace = true } @@ -29,7 +29,7 @@ polkadot-node-jaeger = { path = "../../jaeger" } [dev-dependencies] log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" assert_matches = "1.4.0" kvdb-memorydb = "0.13.0" diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index d0c1f9aa483..26fa54470fb 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -10,7 +10,7 @@ description = "The Candidate Backing Subsystem. Tracks parachain candidates that workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" sp-keystore = { path = "../../../../substrate/primitives/keystore" } polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } @@ -30,7 +30,7 @@ sp-application-crypto = { path = "../../../../substrate/primitives/application-c sp-keyring = { path = "../../../../substrate/primitives/keyring" } sc-keystore = { path = "../../../../substrate/client/keystore" } sp-tracing = { path = "../../../../substrate/primitives/tracing" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } assert_matches = "1.4.0" rstest = "0.18.2" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index 6ecfffd7249..0663e0f1b69 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -10,7 +10,7 @@ description = "Bitfield signing subsystem for the Polkadot node" workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index 15fc8c940d3..0cf4707aad2 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -10,8 +10,8 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } @@ -31,7 +31,7 @@ polkadot-node-core-pvf = { path = "../pvf" } [dev-dependencies] sp-keyring = { path = "../../../../substrate/primitives/keyring" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } assert_matches = "1.4.0" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-core = { path = "../../../../substrate/primitives/core" } diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index 9aa017ecba3..f4d02d3f47b 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -10,7 +10,7 @@ description = "The Chain API subsystem provides access to chain related utility workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } polkadot-node-metrics = { path = "../../metrics" } polkadot-node-subsystem = { path = "../../subsystem" } @@ -19,7 +19,7 @@ sc-client-api = { path = "../../../../substrate/client/api" } sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } [dev-dependencies] -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } maplit = "1.0.2" parity-scale-codec = "3.6.1" polkadot-node-primitives = { path = "../../primitives" } diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 96fd42785cd..318f27a4308 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 1fff0a77170..cd3238449be 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "3.6.1" kvdb = "0.13.0" diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 24da4dc1e31..4f6090f90e9 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -10,11 +10,11 @@ description = "Parachains inherent data provider for Polkadot node" workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } thiserror = { workspace = true } -async-trait = "0.1.74" +async-trait = "0.1.79" polkadot-node-subsystem = { path = "../../subsystem" } polkadot-overseer = { path = "../../overseer" } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index f66a66e859e..ab3cef99e54 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -10,7 +10,7 @@ description = "The Prospective Parachains subsystem. Tracks and handles prospect workspace = true [dependencies] -futures = "0.3.19" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "3.6.4" thiserror = { workspace = true } diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 2a09e2b5b2c..ec1a4abb3ec 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } thiserror = { workspace = true } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index f4f954e316c..91b12b86809 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 6ad36a39be6..a0233d6b751 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -14,7 +14,7 @@ always-assert = "0.1" array-bytes = "6.1" blake3 = "1.5" cfg-if = "1.0" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } is_executable = "1.0.1" diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 56bad9792fa..f3eb9d919aa 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] cfg-if = "1.0" cpu-time = "1.0.0" -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.152" thiserror = { workspace = true } diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index 2de3a6ee325..91f5c35b279 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } schnellru = "0.2.1" @@ -25,8 +25,8 @@ polkadot-node-subsystem-types = { path = "../../subsystem-types" } sp-api = { path = "../../../../substrate/primitives/api" } sp-core = { path = "../../../../substrate/primitives/core" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } -async-trait = "0.1.74" -futures = { version = "0.3.21", features = ["thread-pool"] } +async-trait = "0.1.79" +futures = { version = "0.3.30", features = ["thread-pool"] } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-node-primitives = { path = "../../primitives" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml index 23ab8f84210..6fa3d41eddb 100644 --- a/polkadot/node/jaeger/Cargo.toml +++ b/polkadot/node/jaeger/Cargo.toml @@ -18,6 +18,6 @@ polkadot-node-primitives = { path = "../primitives" } sc-network = { path = "../../../substrate/client/network" } sp-core = { path = "../../../substrate/primitives/core" } thiserror = { workspace = true } -tokio = "1.24.2" +tokio = "1.37" log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", default-features = false } diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index b3eb856f08e..2f63c2f0938 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -40,11 +40,11 @@ polkadot-node-primitives = { path = "../primitives" } polkadot-primitives = { path = "../../primitives" } color-eyre = { version = "0.6.1", default-features = false } assert_matches = "1.5" -async-trait = "0.1.74" +async-trait = "0.1.79" sp-keystore = { path = "../../../substrate/primitives/keystore" } sp-core = { path = "../../../substrate/primitives/core" } clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../gum" } erasure = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } @@ -58,7 +58,7 @@ polkadot-node-core-pvf-prepare-worker = { path = "../core/pvf/prepare-worker" } [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } sp-core = { path = "../../../substrate/primitives/core" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } [build-dependencies] substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index c567278f70e..fbf0abf829e 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../gum" } @@ -30,7 +30,7 @@ log = { workspace = true, default-features = true } assert_cmd = "2.0.4" tempfile = "3.2.0" hyper = { version = "0.14.20", default-features = false, features = ["http1", "tcp"] } -tokio = "1.24.2" +tokio = "1.37" polkadot-test-service = { path = "../test/service", features = ["runtime-metrics"] } substrate-test-utils = { path = "../../../substrate/test-utils" } sc-service = { path = "../../../substrate/client/service" } diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index 2bc09c5f42a..4c04ad83f84 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -20,7 +20,7 @@ polkadot-node-jaeger = { path = "../../jaeger" } rand = "0.8" itertools = "0.10.5" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } @@ -37,5 +37,5 @@ schnorrkel = { version = "0.11.4", default-features = false } # rand_core should match schnorrkel rand_core = "0.6.2" rand_chacha = "0.3.1" -env_logger = "0.9.0" +env_logger = "0.11" log = { workspace = true, default-features = true } diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index ac606bd377f..b5636203f16 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = { version = "3.6.1", features = ["std"] } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 23c4148fa85..dd0e0c43234 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -10,13 +10,13 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" -tokio = "1.24.2" +futures = "0.3.30" +tokio = "1.37" schnellru = "0.2.1" rand = "0.8.5" fatality = "0.0.6" thiserror = { workspace = true } -async-trait = "0.1.74" +async-trait = "0.1.79" gum = { package = "tracing-gum", path = "../../gum" } polkadot-erasure-coding = { path = "../../../erasure-coding" } @@ -30,7 +30,7 @@ sc-network = { path = "../../../../substrate/client/network" } [dev-dependencies] assert_matches = "1.4.0" -env_logger = "0.9.0" +env_logger = "0.11" futures-timer = "3.0.2" log = { workspace = true, default-features = true } diff --git a/polkadot/node/network/bitfield-distribution/Cargo.toml b/polkadot/node/network/bitfield-distribution/Cargo.toml index 0ddb5f643b8..6b5b784b7fd 100644 --- a/polkadot/node/network/bitfield-distribution/Cargo.toml +++ b/polkadot/node/network/bitfield-distribution/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] always-assert = "0.1" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } @@ -30,6 +30,6 @@ sp-keystore = { path = "../../../../substrate/primitives/keystore" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } maplit = "1.0.2" log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" assert_matches = "1.4.0" rand_chacha = "0.3.1" diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index 2e889fc30eb..9c2423e7e58 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -11,8 +11,8 @@ workspace = true [dependencies] always-assert = "0.1" -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index cfd88df958c..2c7135742f5 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] bitvec = { version = "1.0.1", default-features = false, features = ["alloc"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3" gum = { package = "tracing-gum", path = "../../gum" } @@ -30,7 +30,7 @@ tokio-util = "0.7.1" [dev-dependencies] log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" assert_matches = "1.4.0" sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index 14d59d04f2b..ff9c302c731 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } derive_more = "0.99.17" @@ -31,7 +31,7 @@ indexmap = "2.0.0" [dev-dependencies] async-channel = "1.8.0" -async-trait = "0.1.74" +async-trait = "0.1.79" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-tracing = { path = "../../../../substrate/primitives/tracing" } diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml index 8d0edc206d7..2d6f2f954c6 100644 --- a/polkadot/node/network/gossip-support/Cargo.toml +++ b/polkadot/node/network/gossip-support/Cargo.toml @@ -22,7 +22,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-primitives = { path = "../../../primitives" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" rand = { version = "0.8.5", default-features = false } rand_chacha = { version = "0.3.1", default-features = false } @@ -37,7 +37,7 @@ sp-authority-discovery = { path = "../../../../substrate/primitives/authority-di polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } assert_matches = "1.4.0" -async-trait = "0.1.74" +async-trait = "0.1.79" parking_lot = "0.12.1" lazy_static = "1.4.0" quickcheck = "1.0.3" diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 7efa0b8ca82..81936364897 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] async-channel = "1.8.0" -async-trait = "0.1.74" +async-trait = "0.1.79" hex = "0.4.3" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } @@ -19,8 +19,8 @@ polkadot-node-jaeger = { path = "../../jaeger" } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sc-network = { path = "../../../../substrate/client/network" } sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" } -strum = { version = "0.24", features = ["derive"] } -futures = "0.3.21" +strum = { version = "0.26.2", features = ["derive"] } +futures = "0.3.30" thiserror = { workspace = true } fatality = "0.0.6" rand = "0.8" diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index 01f7d818c1c..d8ae031cbf3 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index f91ec80d944..ef79cfe2f70 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] client = { package = "sc-client-api", path = "../../../substrate/client/api" } sp-api = { path = "../../../substrate/primitives/api" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" parking_lot = "0.12.1" polkadot-node-network-protocol = { path = "../network/protocol" } @@ -23,13 +23,13 @@ polkadot-primitives = { path = "../../primitives" } orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] } gum = { package = "tracing-gum", path = "../gum" } sp-core = { path = "../../../substrate/primitives/core" } -async-trait = "0.1.74" +async-trait = "0.1.79" tikv-jemalloc-ctl = { version = "0.5.0", optional = true } [dev-dependencies] metered = { package = "prioritized-metered-channel", version = "0.6.1", default-features = false, features = ["futures_channel"] } sp-core = { path = "../../../substrate/primitives/core" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } femme = "2.2.1" assert_matches = "1.4.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index b4541bcc346..a4bbd824e67 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] bounded-vec = "0.7" -futures = "0.3.21" +futures = "0.3.30" polkadot-primitives = { path = "../../primitives" } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sp-core = { path = "../../../substrate/primitives/core" } diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 932f3e679f4..9688ab55647 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -77,8 +77,8 @@ frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-c frame-benchmarking = { path = "../../../substrate/frame/benchmarking" } # External Crates -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" hex-literal = "0.4.1" is_executable = "1.0.1" gum = { package = "tracing-gum", path = "../gum" } @@ -148,7 +148,7 @@ xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api" } polkadot-test-client = { path = "../test/client" } polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } -env_logger = "0.9.0" +env_logger = "0.11" assert_matches = "1.5.0" serial_test = "2.0.0" tempfile = "3.2" diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index b494f05180d..37224d110e8 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -35,12 +35,12 @@ color-eyre = { version = "0.6.1", default-features = false } polkadot-overseer = { path = "../overseer" } colored = "2.0.4" assert_matches = "1.5" -async-trait = "0.1.57" +async-trait = "0.1.79" sp-keystore = { path = "../../../substrate/primitives/keystore" } sc-keystore = { path = "../../../substrate/client/keystore" } sp-core = { path = "../../../substrate/primitives/core" } clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" bincode = "1.3.3" sha1 = "0.10.6" @@ -48,7 +48,7 @@ hex = "0.4.3" gum = { package = "tracing-gum", path = "../gum" } polkadot-erasure-coding = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" rand = "0.8.5" # `rand` only supports uniform distribution, we need normal distribution for latency. rand_distr = "0.4.3" diff --git a/polkadot/node/subsystem-test-helpers/Cargo.toml b/polkadot/node/subsystem-test-helpers/Cargo.toml index c71f030568d..57678e8e8d4 100644 --- a/polkadot/node/subsystem-test-helpers/Cargo.toml +++ b/polkadot/node/subsystem-test-helpers/Cargo.toml @@ -11,8 +11,8 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" parking_lot = "0.12.1" polkadot-node-subsystem = { path = "../subsystem" } polkadot-erasure-coding = { path = "../../erasure-coding" } diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index 54c8f7e2ade..10190776387 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] derive_more = "0.99.17" -futures = "0.3.21" +futures = "0.3.30" polkadot-primitives = { path = "../../primitives" } polkadot-node-primitives = { path = "../primitives" } polkadot-node-network-protocol = { path = "../network/protocol" } @@ -29,5 +29,5 @@ sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/a smallvec = "1.8.0" substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } thiserror = { workspace = true } -async-trait = "0.1.74" +async-trait = "0.1.79" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index a668f8de76a..79a6a75e4cd 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -10,8 +10,8 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" futures-channel = "0.3.23" itertools = "0.10" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -44,8 +44,8 @@ parity-db = { version = "0.4.12" } [dev-dependencies] assert_matches = "1.4.0" -env_logger = "0.9.0" -futures = { version = "0.3.21", features = ["thread-pool"] } +env_logger = "0.11" +futures = { version = "0.3.30", features = ["thread-pool"] } log = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } lazy_static = "1.4.0" diff --git a/polkadot/node/test/client/Cargo.toml b/polkadot/node/test/client/Cargo.toml index 36748c3b455..7db00404eb8 100644 --- a/polkadot/node/test/client/Cargo.toml +++ b/polkadot/node/test/client/Cargo.toml @@ -38,7 +38,7 @@ frame-benchmarking = { path = "../../../../substrate/frame/benchmarking" } [dev-dependencies] sp-keyring = { path = "../../../../substrate/primitives/keyring" } -futures = "0.3.21" +futures = "0.3.30" [features] runtime-benchmarks = [ diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index e7892abcd87..48a206f23c6 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -10,13 +10,13 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" hex = "0.4.3" gum = { package = "tracing-gum", path = "../../gum" } rand = "0.8.5" serde_json = { workspace = true, default-features = true } tempfile = "3.2.0" -tokio = "1.24.2" +tokio = "1.37" # Polkadot dependencies polkadot-overseer = { path = "../../overseer" } @@ -63,7 +63,7 @@ substrate-test-client = { path = "../../../../substrate/test-utils/client" } [dev-dependencies] pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } substrate-test-utils = { path = "../../../../substrate/test-utils" } -tokio = { version = "1.24.2", features = ["macros"] } +tokio = { version = "1.37", features = ["macros"] } [features] runtime-metrics = ["polkadot-test-runtime/runtime-metrics"] diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index d8c3cea7ad8..15eea2addc8 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -14,7 +14,7 @@ workspace = true # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false, features = ["serde"] } sp-core = { path = "../../substrate/primitives/core", default-features = false, features = ["serde"] } diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index 30bce806f9f..5a2b5405741 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -17,7 +17,7 @@ path = "src/main.rs" [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" log = { workspace = true, default-features = true } diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index bede10a7673..cacf7304f90 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -17,7 +17,7 @@ path = "src/main.rs" [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" log = { workspace = true, default-features = true } diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index e63fb621c78..004fa62acf3 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -13,7 +13,7 @@ workspace = true bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "serde"] } hex-literal = "0.4.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } log = { workspace = true, default-features = false } serde = { features = ["alloc", "derive"], workspace = true } diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index eae5d4fb2ef..4219a7e7b0d 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -15,7 +15,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc"], workspace = true } serde_derive = { workspace = true } static_assertions = "1.1.0" diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 6e693b83ae1..dff8549f29f 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -15,7 +15,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } derive_more = "0.99.17" bitflags = "1.3.2" @@ -59,7 +59,7 @@ polkadot-runtime-metrics = { path = "../metrics", default-features = false } polkadot-core-primitives = { path = "../../core-primitives", default-features = false } [dev-dependencies] -futures = "0.3.21" +futures = "0.3.30" hex-literal = "0.4.1" keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } frame-support-test = { path = "../../../substrate/frame/support/test" } diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index ff178b17070..19cc984e582 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } serde = { workspace = true } serde_derive = { optional = true, workspace = true } diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 9753a409304..35fb684597e 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -15,7 +15,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { workspace = true } serde_derive = { optional = true, workspace = true } smallvec = "1.8.0" diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 4c27d4f6d1f..d726adfb8e6 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } serde = { workspace = true } diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index f9ccfb9833a..b214342d2f4 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -16,7 +16,7 @@ derivative = { version = "2.2.0", default-features = false, features = ["use_cor impl-trait-for-tuples = "0.2.2" log = { workspace = true } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-weights = { path = "../../substrate/primitives/weights", default-features = false, features = ["serde"] } serde = { features = ["alloc", "derive", "rc"], workspace = true } schemars = { version = "0.8.13", default-features = true, optional = true } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index 80f2d1deedf..8c71426a6fa 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 08307c34f8a..460597e6649 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] bounded-collections = { version = "0.2.0", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 10726b0f511..997ca99fb12 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index 71bd58073db..aebc768bb90 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -13,7 +13,7 @@ workspace = true impl-trait-for-tuples = "0.2.2" environmental = { version = "1.1.4", default-features = false } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } xcm = { package = "staging-xcm", path = "..", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index 1e572e6210a..9c9c53f0ee1 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -14,7 +14,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.6.1" } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system" } -futures = "0.3.21" +futures = "0.3.30" pallet-transaction-payment = { path = "../../../../substrate/frame/transaction-payment" } pallet-xcm = { path = "../../pallet-xcm" } polkadot-test-client = { path = "../../../node/test/client" } diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml b/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml index 682642d13c3..30c7c0bac14 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml @@ -16,7 +16,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } sp-api = { path = "../../../substrate/primitives/api", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", "serde", ] } diff --git a/polkadot/xcm/xcm-simulator/example/Cargo.toml b/polkadot/xcm/xcm-simulator/example/Cargo.toml index af471df60ab..0e13a10a141 100644 --- a/polkadot/xcm/xcm-simulator/example/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/example/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } log = { workspace = true } frame-system = { path = "../../../../substrate/frame/system" } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml index 30644dc0e0a..ca794a07bfb 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -14,7 +14,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.6.1" } honggfuzz = "0.5.55" arbitrary = "1.3.2" -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } frame-system = { path = "../../../../substrate/frame/system" } frame-support = { path = "../../../../substrate/frame/support" } diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index d5de1dbe6c4..49485fe2a1b 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -44,4 +44,4 @@ lazy_static = "1.4.0" parity-db = "0.4.12" sc-transaction-pool = { path = "../../../client/transaction-pool" } sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 8bddbbe0482..6346063b9d2 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -45,7 +45,7 @@ clap = { version = "4.5.3", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } serde = { features = ["derive"], workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } rand = "0.8" @@ -129,7 +129,7 @@ sc-block-builder = { path = "../../../client/block-builder" } sp-tracing = { path = "../../../primitives/tracing" } sp-blockchain = { path = "../../../primitives/blockchain" } sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } -futures = "0.3.21" +futures = "0.3.30" tempfile = "3.1.0" assert_cmd = "2.0.2" nix = { version = "0.26.1", features = ["signal"] } @@ -160,7 +160,7 @@ sp-externalities = { path = "../../../primitives/externalities" } sp-keyring = { path = "../../../primitives/keyring" } sp-runtime = { path = "../../../primitives/runtime" } serde_json = { workspace = true, default-features = true } -scale-info = { version = "2.10.0", features = ["derive", "serde"] } +scale-info = { version = "2.11.1", features = ["derive", "serde"] } sp-trie = { path = "../../../primitives/trie" } sp-state-machine = { path = "../../../primitives/state-machine" } diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 4d342ceb460..8f68b1d3e2f 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -23,7 +23,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } static_assertions = "1.1.0" log = { workspace = true } serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 31f8689d46c..fa3f90193ba 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } fs_extra = "1" -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } tempfile = "3.1.0" frame-system = { path = "../../../frame/system" } diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index cd7b613e277..fb650c5b532 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } fnv = "1.0.6" -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } parking_lot = "0.12.1" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index 26580064b3c..dbd9ba0131a 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -21,7 +21,7 @@ prost-build = "0.11" [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" ip_network = "0.4.1" libp2p = { version = "0.51.4", features = ["ed25519", "kad"] } @@ -43,7 +43,7 @@ sp-blockchain = { path = "../../primitives/blockchain" } sp-core = { path = "../../primitives/core" } sp-keystore = { path = "../../primitives/keystore" } sp-runtime = { path = "../../primitives/runtime" } -async-trait = "0.1.74" +async-trait = "0.1.79" multihash-codetable = { version = "0.1.1", features = [ "digest", "serde", diff --git a/substrate/client/basic-authorship/Cargo.toml b/substrate/client/basic-authorship/Cargo.toml index 51a06464d0d..4890b66c9b2 100644 --- a/substrate/client/basic-authorship/Cargo.toml +++ b/substrate/client/basic-authorship/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 47b29555e66..805d3ee117f 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -20,7 +20,7 @@ array-bytes = "6.1" chrono = "0.4.31" clap = { version = "4.5.3", features = ["derive", "string", "wrap_help"] } fdlimit = "0.3.0" -futures = "0.3.21" +futures = "0.3.30" itertools = "0.10.3" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/aura/Cargo.toml b/substrate/client/consensus/aura/Cargo.toml index 213f75974da..64e2d16cd91 100644 --- a/substrate/client/consensus/aura/Cargo.toml +++ b/substrate/client/consensus/aura/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml index c98fb7112b7..b001e3d117a 100644 --- a/substrate/client/consensus/babe/Cargo.toml +++ b/substrate/client/consensus/babe/Cargo.toml @@ -17,9 +17,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } num-bigint = "0.4.3" num-rational = "0.4.1" @@ -54,4 +54,4 @@ sc-network-test = { path = "../../network/test" } sp-timestamp = { path = "../../../primitives/timestamp" } sp-tracing = { path = "../../../primitives/tracing" } substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } -tokio = "1.22.0" +tokio = "1.37" diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml index 043b566673e..b2661bbde27 100644 --- a/substrate/client/consensus/babe/rpc/Cargo.toml +++ b/substrate/client/consensus/babe/rpc/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } -futures = "0.3.21" +futures = "0.3.30" serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } sc-consensus-babe = { path = ".." } @@ -34,7 +34,7 @@ sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] serde_json = { workspace = true, default-features = true } -tokio = "1.22.0" +tokio = "1.37" sc-consensus = { path = "../../common" } sc-keystore = { path = "../../../keystore" } sc-transaction-pool-api = { path = "../../../transaction-pool/api" } diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index 8552a490022..c1d57baa394 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -14,10 +14,10 @@ workspace = true [dependencies] array-bytes = "6.1" async-channel = "1.8.0" -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } fnv = "1.0.6" -futures = "0.3" +futures = "0.3.30" log = { workspace = true, default-features = true } parking_lot = "0.12.1" thiserror = { workspace = true } @@ -40,7 +40,7 @@ sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } sp-keystore = { path = "../../../primitives/keystore" } sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } sp-runtime = { path = "../../../primitives/runtime" } -tokio = "1.22.0" +tokio = "1.37" [dev-dependencies] diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index bb2ae4a0896..e46fc4f4410 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" diff --git a/substrate/client/consensus/common/Cargo.toml b/substrate/client/consensus/common/Cargo.toml index f691e84717d..b2738a1d12d 100644 --- a/substrate/client/consensus/common/Cargo.toml +++ b/substrate/client/consensus/common/Cargo.toml @@ -16,8 +16,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" -futures = { version = "0.3.21", features = ["thread-pool"] } +async-trait = "0.1.79" +futures = { version = "0.3.30", features = ["thread-pool"] } futures-timer = "3.0.1" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index 1ab95352522..797b4ea35b2 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -19,10 +19,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ahash = "0.8.2" array-bytes = "6.1" -async-trait = "0.1.74" +async-trait = "0.1.79" dyn-clone = "1.0" finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", features = ["derive"] } @@ -58,7 +58,7 @@ sp-runtime = { path = "../../../primitives/runtime" } assert_matches = "1.3.0" finality-grandpa = { version = "0.16.2", features = ["derive-codec", "test-helpers"] } serde = { workspace = true, default-features = true } -tokio = "1.22.0" +tokio = "1.37" sc-network = { path = "../../network" } sc-network-test = { path = "../../network/test" } sp-keyring = { path = "../../../primitives/keyring" } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index f7e87415448..0789a429ac4 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } -futures = "0.3.16" +futures = "0.3.30" jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", features = ["derive"] } diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index ac32fed7228..1422d46105b 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -18,9 +18,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } assert_matches = "1.3.0" -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/pow/Cargo.toml b/substrate/client/consensus/pow/Cargo.toml index 0791514035b..ecfa29aa194 100644 --- a/substrate/client/consensus/pow/Cargo.toml +++ b/substrate/client/consensus/pow/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } parking_lot = "0.12.1" diff --git a/substrate/client/consensus/slots/Cargo.toml b/substrate/client/consensus/slots/Cargo.toml index 75f8b29a2fd..4ac6ce90713 100644 --- a/substrate/client/consensus/slots/Cargo.toml +++ b/substrate/client/consensus/slots/Cargo.toml @@ -17,9 +17,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } sc-client-api = { path = "../../api" } diff --git a/substrate/client/executor/Cargo.toml b/substrate/client/executor/Cargo.toml index 7fad7e3a2a0..cb0befe9871 100644 --- a/substrate/client/executor/Cargo.toml +++ b/substrate/client/executor/Cargo.toml @@ -51,7 +51,7 @@ tracing-subscriber = "0.2.19" paste = "1.0" regex = "1.6.0" criterion = "0.4.0" -env_logger = "0.9" +env_logger = "0.11" num_cpus = "1.13.1" tempfile = "3.3.0" diff --git a/substrate/client/informant/Cargo.toml b/substrate/client/informant/Cargo.toml index bd15e94ebaf..191ef5f19f8 100644 --- a/substrate/client/informant/Cargo.toml +++ b/substrate/client/informant/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } sc-client-api = { path = "../api" } diff --git a/substrate/client/merkle-mountain-range/Cargo.toml b/substrate/client/merkle-mountain-range/Cargo.toml index 60232bccb0e..46b7a1011c4 100644 --- a/substrate/client/merkle-mountain-range/Cargo.toml +++ b/substrate/client/merkle-mountain-range/Cargo.toml @@ -15,7 +15,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3" +futures = "0.3.30" log = { workspace = true, default-features = true } sp-api = { path = "../../primitives/api" } sp-blockchain = { path = "../../primitives/blockchain" } @@ -32,4 +32,4 @@ parking_lot = "0.12.1" sc-block-builder = { path = "../block-builder" } sp-tracing = { path = "../../primitives/tracing" } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -tokio = "1.17.0" +tokio = "1.37" diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml index 736184f4668..3beeae9f9b1 100644 --- a/substrate/client/mixnet/Cargo.toml +++ b/substrate/client/mixnet/Cargo.toml @@ -21,7 +21,7 @@ arrayvec = "0.7.2" blake2 = "0.10.4" bytes = "1" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -futures = "0.3.25" +futures = "0.3.30" futures-timer = "3.0.2" libp2p-identity = { version = "0.1.3", features = ["peerid"] } log = { workspace = true, default-features = true } diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml index a14761c0d6e..346e6bd6a5c 100644 --- a/substrate/client/network-gossip/Cargo.toml +++ b/substrate/client/network-gossip/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ahash = "0.8.2" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" libp2p = "0.51.4" log = { workspace = true, default-features = true } @@ -31,8 +31,8 @@ sc-network-sync = { path = "../network/sync" } sp-runtime = { path = "../../primitives/runtime" } [dev-dependencies] -tokio = "1.22.0" -async-trait = "0.1.74" +tokio = "1.37" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } quickcheck = { version = "1.0.3", default-features = false } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index c6f17647166..a891336d241 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -19,13 +19,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" async-channel = "1.8.0" -async-trait = "0.1" +async-trait = "0.1.79" asynchronous-codec = "0.6" bytes = "1" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } either = "1.5.3" fnv = "1.0.6" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" ip_network = "0.4.1" libp2p = { version = "0.51.4", features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "request-response", "tcp", "tokio", "websocket", "yamux"] } diff --git a/substrate/client/network/bitswap/Cargo.toml b/substrate/client/network/bitswap/Cargo.toml index 7ef3ea21242..587e2e70867 100644 --- a/substrate/client/network/bitswap/Cargo.toml +++ b/substrate/client/network/bitswap/Cargo.toml @@ -21,7 +21,7 @@ prost-build = "0.11" [dependencies] async-channel = "1.8.0" cid = "0.9.0" -futures = "0.3.21" +futures = "0.3.30" libp2p-identity = { version = "0.1.3", features = ["peerid"] } log = { workspace = true, default-features = true } prost = "0.12" diff --git a/substrate/client/network/common/Cargo.toml b/substrate/client/network/common/Cargo.toml index 7a6d904b74b..f9248b0bb51 100644 --- a/substrate/client/network/common/Cargo.toml +++ b/substrate/client/network/common/Cargo.toml @@ -19,12 +19,12 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.11" [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" bitflags = "1.3.2" codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", ] } -futures = "0.3.21" +futures = "0.3.30" libp2p-identity = { version = "0.1.3", features = ["peerid"] } sc-consensus = { path = "../../consensus/common" } sp-consensus = { path = "../../../primitives/consensus/common" } diff --git a/substrate/client/network/light/Cargo.toml b/substrate/client/network/light/Cargo.toml index c757f727fb7..2628fd07d3e 100644 --- a/substrate/client/network/light/Cargo.toml +++ b/substrate/client/network/light/Cargo.toml @@ -24,7 +24,7 @@ array-bytes = "6.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", ] } -futures = "0.3.21" +futures = "0.3.30" libp2p-identity = { version = "0.1.3", features = ["peerid"] } log = { workspace = true, default-features = true } prost = "0.12" diff --git a/substrate/client/network/statement/Cargo.toml b/substrate/client/network/statement/Cargo.toml index b6efee5d9d3..635cfc5d0d5 100644 --- a/substrate/client/network/statement/Cargo.toml +++ b/substrate/client/network/statement/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = "6.1" async-channel = "1.8.0" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" libp2p = "0.51.4" log = { workspace = true, default-features = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index 32ba3b6356c..6b46d67a3ca 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -21,9 +21,9 @@ prost-build = "0.11" [dependencies] array-bytes = "6.1" async-channel = "1.8.0" -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" libp2p = "0.51.4" log = { workspace = true, default-features = true } diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index 4f57287a39c..56fc89e1b2b 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -tokio = "1.22.0" -async-trait = "0.1.74" -futures = "0.3.21" +tokio = "1.37" +async-trait = "0.1.79" +futures = "0.3.30" futures-timer = "3.0.1" libp2p = "0.51.4" log = { workspace = true, default-features = true } diff --git a/substrate/client/network/transactions/Cargo.toml b/substrate/client/network/transactions/Cargo.toml index 01c8ac8814d..0ab7386ef21 100644 --- a/substrate/client/network/transactions/Cargo.toml +++ b/substrate/client/network/transactions/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" libp2p = "0.51.4" log = { workspace = true, default-features = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml index caa4bb03f40..a3a3cfaa8fc 100644 --- a/substrate/client/offchain/Cargo.toml +++ b/substrate/client/offchain/Cargo.toml @@ -20,7 +20,7 @@ array-bytes = "6.1" bytes = "1.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } fnv = "1.0.6" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" hyper = { version = "0.14.16", features = ["http2", "stream"] } hyper-rustls = { version = "0.24.0", features = ["http2"] } @@ -46,7 +46,7 @@ log = { workspace = true, default-features = true } [dev-dependencies] lazy_static = "1.4.0" -tokio = "1.22.0" +tokio = "1.37" sc-block-builder = { path = "../block-builder" } sc-client-db = { path = "../db", default-features = true } sc-transaction-pool = { path = "../transaction-pool" } diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index 1b7af6a4a52..169714d2245 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/rpc-servers/Cargo.toml b/substrate/client/rpc-servers/Cargo.toml index 3adc81c57d5..bc21b5b1582 100644 --- a/substrate/client/rpc-servers/Cargo.toml +++ b/substrate/client/rpc-servers/Cargo.toml @@ -25,5 +25,5 @@ tower-http = { version = "0.4.0", features = ["cors"] } tower = { version = "0.4.13", features = ["util"] } http = "0.2.8" hyper = "0.14.27" -futures = "0.3.29" +futures = "0.3.30" governor = "0.6.0" diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index 937e5c6b626..e2612d91454 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -34,7 +34,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1" } thiserror = { workspace = true } serde = { workspace = true, default-features = true } hex = "0.4" -futures = "0.3.21" +futures = "0.3.30" parking_lot = "0.12.1" tokio-stream = { version = "0.1.14", features = ["sync"] } tokio = { version = "1.22.0", features = ["sync"] } diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml index f65e6c9a59e..dff34215b02 100644 --- a/substrate/client/rpc/Cargo.toml +++ b/substrate/client/rpc/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" jsonrpsee = { version = "0.22", features = ["server"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" @@ -40,10 +40,10 @@ sp-runtime = { path = "../../primitives/runtime" } sp-session = { path = "../../primitives/session" } sp-version = { path = "../../primitives/version" } sp-statement-store = { path = "../../primitives/statement-store" } -tokio = "1.22.0" +tokio = "1.37" [dev-dependencies] -env_logger = "0.9" +env_logger = "0.11" assert_matches = "1.3.0" sc-block-builder = { path = "../block-builder" } sc-network = { path = "../network" } @@ -51,7 +51,7 @@ sc-network-common = { path = "../network/common" } sc-transaction-pool = { path = "../transaction-pool" } sp-consensus = { path = "../../primitives/consensus/common" } sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -tokio = "1.22.0" +tokio = "1.37" sp-io = { path = "../../primitives/io" } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } pretty_assertions = "1.2.1" diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index bbf67d1fbd0..b81f2e2f55a 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -30,7 +30,7 @@ runtime-benchmarks = [ [dependencies] jsonrpsee = { version = "0.22", features = ["server"] } thiserror = { workspace = true } -futures = "0.3.21" +futures = "0.3.30" rand = "0.8.5" parking_lot = "0.12.1" log = { workspace = true, default-features = true } @@ -79,7 +79,7 @@ sc-tracing = { path = "../tracing" } sc-sysinfo = { path = "../sysinfo" } tracing = "0.1.29" tracing-futures = { version = "0.2.4" } -async-trait = "0.1.74" +async-trait = "0.1.79" tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "time"] } tempfile = "3.1.0" directories = "5.0.1" diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index ee7e60f6011..2de984689ba 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] async-channel = "1.8.0" array-bytes = "6.1" fdlimit = "0.3.0" -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } parity-scale-codec = "3.6.1" parking_lot = "0.12.1" diff --git a/substrate/client/statement-store/Cargo.toml b/substrate/client/statement-store/Cargo.toml index 676f6cb36f6..8ca6d11dbe0 100644 --- a/substrate/client/statement-store/Cargo.toml +++ b/substrate/client/statement-store/Cargo.toml @@ -31,4 +31,4 @@ sc-keystore = { path = "../keystore" } [dev-dependencies] tempfile = "3.1.0" -env_logger = "0.9" +env_logger = "0.11" diff --git a/substrate/client/sysinfo/Cargo.toml b/substrate/client/sysinfo/Cargo.toml index ba58452ffb5..32b7755c64b 100644 --- a/substrate/client/sysinfo/Cargo.toml +++ b/substrate/client/sysinfo/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.19" +futures = "0.3.30" libc = "0.2" log = { workspace = true, default-features = true } rand = "0.8.5" diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml index 8ab00202f0b..9a29a33a591 100644 --- a/substrate/client/telemetry/Cargo.toml +++ b/substrate/client/telemetry/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] chrono = "0.4.31" -futures = "0.3.21" +futures = "0.3.30" libp2p = { version = "0.51.4", features = ["dns", "tcp", "tokio", "wasm-ext", "websocket"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index 2ca37afd61b..e2a0b87eaab 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" linked-hash-map = "0.5.4" log = { workspace = true, default-features = true } diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index d52e4783fab..1bb72ef5544 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -12,9 +12,9 @@ description = "Transaction pool client facing API." workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/utils/Cargo.toml b/substrate/client/utils/Cargo.toml index 7f604219bc0..a101f4b3f3a 100644 --- a/substrate/client/utils/Cargo.toml +++ b/substrate/client/utils/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] async-channel = "1.8.0" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" lazy_static = "1.4.0" log = { workspace = true, default-features = true } diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index ab394592071..27001ee5afd 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -22,7 +22,7 @@ targets = ["x86_64-unknown-linux-gnu"] parity-scale-codec = { version = "3.2.2", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.6.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } diff --git a/substrate/frame/alliance/Cargo.toml b/substrate/frame/alliance/Cargo.toml index bc873ad69c8..af1fcb296f6 100644 --- a/substrate/frame/alliance/Cargo.toml +++ b/substrate/frame/alliance/Cargo.toml @@ -20,7 +20,7 @@ array-bytes = { version = "6.1", optional = true } log = { workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } diff --git a/substrate/frame/asset-conversion/Cargo.toml b/substrate/frame/asset-conversion/Cargo.toml index 1f2db14dac2..1a8e8eea484 100644 --- a/substrate/frame/asset-conversion/Cargo.toml +++ b/substrate/frame/asset-conversion/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../../primitives/api", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } diff --git a/substrate/frame/asset-rate/Cargo.toml b/substrate/frame/asset-rate/Cargo.toml index 6e7bbf29fc4..cd502148a8d 100644 --- a/substrate/frame/asset-rate/Cargo.toml +++ b/substrate/frame/asset-rate/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index 2efc96348cb..3b95750c14c 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false } # Needed for various traits. In our case, `OnFinalize`. sp-runtime = { path = "../../primitives/runtime", default-features = false } diff --git a/substrate/frame/atomic-swap/Cargo.toml b/substrate/frame/atomic-swap/Cargo.toml index 0283af1d1b0..c641071df90 100644 --- a/substrate/frame/atomic-swap/Cargo.toml +++ b/substrate/frame/atomic-swap/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } diff --git a/substrate/frame/aura/Cargo.toml b/substrate/frame/aura/Cargo.toml index de698487efa..97349107f1f 100644 --- a/substrate/frame/aura/Cargo.toml +++ b/substrate/frame/aura/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } pallet-timestamp = { path = "../timestamp", default-features = false } diff --git a/substrate/frame/authority-discovery/Cargo.toml b/substrate/frame/authority-discovery/Cargo.toml index 0922007e57e..a7aba711a56 100644 --- a/substrate/frame/authority-discovery/Cargo.toml +++ b/substrate/frame/authority-discovery/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } pallet-session = { path = "../session", default-features = false, features = [ diff --git a/substrate/frame/authorship/Cargo.toml b/substrate/frame/authorship/Cargo.toml index 4b318f12519..2bfd59a48e1 100644 --- a/substrate/frame/authorship/Cargo.toml +++ b/substrate/frame/authorship/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } diff --git a/substrate/frame/babe/Cargo.toml b/substrate/frame/babe/Cargo.toml index fc7385efa1f..9f6ef2bc05e 100644 --- a/substrate/frame/babe/Cargo.toml +++ b/substrate/frame/babe/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index 49d28482c32..5deb504d0a4 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index b27a5bb2478..28eabdaf506 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index 17707731773..8fcb8e1d559 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -15,7 +15,7 @@ workspace = true array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } binary-merkle-tree = { path = "../../utils/binary-merkle-tree", default-features = false } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index e38eaa6fb07..f181f4d41cd 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index bf42aae979c..8210e8cfa62 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = linregress = { version = "0.5.1", optional = true } log = { workspace = true } paste = "1.0" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-support-procedural = { path = "../support/procedural", default-features = false } diff --git a/substrate/frame/benchmarking/pov/Cargo.toml b/substrate/frame/benchmarking/pov/Cargo.toml index ce5daeb5b7b..5d3aaa78904 100644 --- a/substrate/frame/benchmarking/pov/Cargo.toml +++ b/substrate/frame/benchmarking/pov/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "..", default-features = false } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/bounties/Cargo.toml b/substrate/frame/bounties/Cargo.toml index 191a38d20b2..3307e47e981 100644 --- a/substrate/frame/bounties/Cargo.toml +++ b/substrate/frame/bounties/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index 31f9a6b6317..3b6bd2019cc 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } bitvec = { version = "1.0.0", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } diff --git a/substrate/frame/child-bounties/Cargo.toml b/substrate/frame/child-bounties/Cargo.toml index 589ca95a751..14a5e25e13d 100644 --- a/substrate/frame/child-bounties/Cargo.toml +++ b/substrate/frame/child-bounties/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/collective/Cargo.toml b/substrate/frame/collective/Cargo.toml index e19e1496e7b..850390409ab 100644 --- a/substrate/frame/collective/Cargo.toml +++ b/substrate/frame/collective/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 2aa37a2bf21..d963ac261d1 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -24,7 +24,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } smallvec = { version = "1", default-features = false, features = [ @@ -58,7 +58,7 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/x [dev-dependencies] array-bytes = "6.1" assert_matches = "1" -env_logger = "0.9" +env_logger = "0.11" pretty_assertions = "1" wat = "1" pallet-contracts-fixtures = { path = "./fixtures" } diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml index 0c4cd1356f4..387c3ca39d0 100644 --- a/substrate/frame/contracts/mock-network/Cargo.toml +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -30,7 +30,7 @@ pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = polkadot-parachain-primitives = { path = "../../../../polkadot/parachain" } polkadot-primitives = { path = "../../../../polkadot/primitives" } polkadot-runtime-parachains = { path = "../../../../polkadot/runtime/parachains" } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../../../primitives/api", default-features = false } sp-core = { path = "../../../primitives/core", default-features = false } sp-io = { path = "../../../primitives/io", default-features = false } diff --git a/substrate/frame/contracts/uapi/Cargo.toml b/substrate/frame/contracts/uapi/Cargo.toml index 12bb6b8fc2c..d9a5ee14f05 100644 --- a/substrate/frame/contracts/uapi/Cargo.toml +++ b/substrate/frame/contracts/uapi/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] paste = { version = "1.0", default-features = false } bitflags = "1.0" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"], optional = true } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"], optional = true } scale = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len", diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index ff5af995026..ffb5122ed7f 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/core-fellowship/Cargo.toml b/substrate/frame/core-fellowship/Cargo.toml index 3e678d32744..b4258281b70 100644 --- a/substrate/frame/core-fellowship/Cargo.toml +++ b/substrate/frame/core-fellowship/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index 9a55cda5340..edd2d742b50 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/election-provider-multi-phase/Cargo.toml b/substrate/frame/election-provider-multi-phase/Cargo.toml index eadce8c1ff8..2074b51f50f 100644 --- a/substrate/frame/election-provider-multi-phase/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } log = { workspace = true } @@ -38,7 +38,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } pallet-election-provider-support-benchmarking = { path = "../election-provider-support/benchmarking", default-features = false, optional = true } rand = { version = "0.8.5", default-features = false, features = ["alloc", "small_rng"], optional = true } -strum = { version = "0.24.1", default-features = false, features = ["derive"], optional = true } +strum = { version = "0.26.2", default-features = false, features = ["derive"], optional = true } [dev-dependencies] parking_lot = "0.12.1" diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml index e6384450a6f..25c280921f8 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] parking_lot = "0.12.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } log = { workspace = true } sp-runtime = { path = "../../../primitives/runtime" } diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml index b182b831ea0..0d9748ee34e 100644 --- a/substrate/frame/election-provider-support/Cargo.toml +++ b/substrate/frame/election-provider-support/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { path = "solution-type" } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index 1bf1165229a..09c6a492dd0 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -25,7 +25,7 @@ proc-macro-crate = "3.0.0" [dev-dependencies] parity-scale-codec = "3.6.1" -scale-info = "2.10.0" +scale-info = "2.11.1" sp-arithmetic = { path = "../../../primitives/arithmetic" } # used by generate_solution_type: frame-election-provider-support = { path = ".." } diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index a27f0f7d4dd..1fb9e2387ed 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -21,7 +21,7 @@ honggfuzz = "0.5" rand = { version = "0.8", features = ["small_rng", "std"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { path = ".." } frame-election-provider-support = { path = "../.." } sp-arithmetic = { path = "../../../../primitives/arithmetic" } diff --git a/substrate/frame/elections-phragmen/Cargo.toml b/substrate/frame/elections-phragmen/Cargo.toml index 4dc4a3454aa..81dc48476a0 100644 --- a/substrate/frame/elections-phragmen/Cargo.toml +++ b/substrate/frame/elections-phragmen/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/examples/basic/Cargo.toml b/substrate/frame/examples/basic/Cargo.toml index e4ab5112201..43b37c6beba 100644 --- a/substrate/frame/examples/basic/Cargo.toml +++ b/substrate/frame/examples/basic/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/default-config/Cargo.toml b/substrate/frame/examples/default-config/Cargo.toml index e40845a425a..2aa062ee6c1 100644 --- a/substrate/frame/examples/default-config/Cargo.toml +++ b/substrate/frame/examples/default-config/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/dev-mode/Cargo.toml b/substrate/frame/examples/dev-mode/Cargo.toml index a9c4e3f3b1f..71b97796ecd 100644 --- a/substrate/frame/examples/dev-mode/Cargo.toml +++ b/substrate/frame/examples/dev-mode/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } pallet-balances = { path = "../../balances", default-features = false } diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index 93a46ba7b24..76bfd65282a 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame = { path = "../..", default-features = false, features = ["experimental", "runtime"] } diff --git a/substrate/frame/examples/kitchensink/Cargo.toml b/substrate/frame/examples/kitchensink/Cargo.toml index 37384107530..d8311897c6e 100644 --- a/substrate/frame/examples/kitchensink/Cargo.toml +++ b/substrate/frame/examples/kitchensink/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false, features = ["experimental"] } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/offchain-worker/Cargo.toml b/substrate/frame/examples/offchain-worker/Cargo.toml index fc5151ff292..468af0345ca 100644 --- a/substrate/frame/examples/offchain-worker/Cargo.toml +++ b/substrate/frame/examples/offchain-worker/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } lite-json = { version = "0.2.0", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } sp-core = { path = "../../../primitives/core", default-features = false } diff --git a/substrate/frame/examples/single-block-migrations/Cargo.toml b/substrate/frame/examples/single-block-migrations/Cargo.toml index 1020cc9e2bb..b1d560a85f3 100644 --- a/substrate/frame/examples/single-block-migrations/Cargo.toml +++ b/substrate/frame/examples/single-block-migrations/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] docify = "0.2.8" log = { version = "0.4.21", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-executive = { path = "../../executive", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/split/Cargo.toml b/substrate/frame/examples/split/Cargo.toml index 230dc980b1a..1ef3521e060 100644 --- a/substrate/frame/examples/split/Cargo.toml +++ b/substrate/frame/examples/split/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/tasks/Cargo.toml b/substrate/frame/examples/tasks/Cargo.toml index 4d14bf313d7..3f59d57ea0f 100644 --- a/substrate/frame/examples/tasks/Cargo.toml +++ b/substrate/frame/examples/tasks/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index 63285e4cb49..95de7c3f3d1 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } frame-try-runtime = { path = "../try-runtime", default-features = false, optional = true } diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index fb425dc310d..f05f22f7641 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/glutton/Cargo.toml b/substrate/frame/glutton/Cargo.toml index 7de18080b87..5ce010f1c26 100644 --- a/substrate/frame/glutton/Cargo.toml +++ b/substrate/frame/glutton/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] blake2 = { version = "0.10.4", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index db540564fbe..f4dd92129f3 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/identity/Cargo.toml b/substrate/frame/identity/Cargo.toml index 912444bf603..8c0052004ae 100644 --- a/substrate/frame/identity/Cargo.toml +++ b/substrate/frame/identity/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } enumflags2 = { version = "0.7.7" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/im-online/Cargo.toml b/substrate/frame/im-online/Cargo.toml index 038cbbcd678..46b416f0f9a 100644 --- a/substrate/frame/im-online/Cargo.toml +++ b/substrate/frame/im-online/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/indices/Cargo.toml b/substrate/frame/indices/Cargo.toml index f810ea36a70..7b14bf358f1 100644 --- a/substrate/frame/indices/Cargo.toml +++ b/substrate/frame/indices/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml index f26bfa95bfd..f4d65d9e560 100644 --- a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml +++ b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } safe-mix = { version = "1.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } diff --git a/substrate/frame/lottery/Cargo.toml b/substrate/frame/lottery/Cargo.toml index 3930ff32fc9..5f79704445f 100644 --- a/substrate/frame/lottery/Cargo.toml +++ b/substrate/frame/lottery/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/membership/Cargo.toml b/substrate/frame/membership/Cargo.toml index 64214670292..6f67db0ae70 100644 --- a/substrate/frame/membership/Cargo.toml +++ b/substrate/frame/membership/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/merkle-mountain-range/Cargo.toml b/substrate/frame/merkle-mountain-range/Cargo.toml index d623e25cec2..6dc919e1650 100644 --- a/substrate/frame/merkle-mountain-range/Cargo.toml +++ b/substrate/frame/merkle-mountain-range/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } @@ -29,7 +29,7 @@ sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] array-bytes = "6.1" -env_logger = "0.9" +env_logger = "0.11" itertools = "0.10.3" [features] diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index 8d9da7df39e..f263c41831b 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } environmental = { version = "1.1.4", default-features = false } diff --git a/substrate/frame/migrations/Cargo.toml b/substrate/frame/migrations/Cargo.toml index 0a91d2f94c4..4726ac5c521 100644 --- a/substrate/frame/migrations/Cargo.toml +++ b/substrate/frame/migrations/Cargo.toml @@ -15,7 +15,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = docify = "0.2.8" impl-trait-for-tuples = "0.2.2" log = "0.4.21" -scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" } frame-support = { default-features = false, path = "../support" } diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index d1bb01dde1a..6a4ef5c29ac 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -21,7 +21,7 @@ frame-benchmarking = { default-features = false, optional = true, path = "../ben frame-support = { default-features = false, path = "../support" } frame-system = { default-features = false, path = "../system" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], workspace = true } sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto" } sp-arithmetic = { default-features = false, path = "../../primitives/arithmetic" } diff --git a/substrate/frame/multisig/Cargo.toml b/substrate/frame/multisig/Cargo.toml index 1d2a79bdc52..2437acbc2e2 100644 --- a/substrate/frame/multisig/Cargo.toml +++ b/substrate/frame/multisig/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/nft-fractionalization/Cargo.toml b/substrate/frame/nft-fractionalization/Cargo.toml index 8002b7e1165..b5a929468f7 100644 --- a/substrate/frame/nft-fractionalization/Cargo.toml +++ b/substrate/frame/nft-fractionalization/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/nfts/Cargo.toml b/substrate/frame/nfts/Cargo.toml index 69e9ea170b1..4f818ea3e08 100644 --- a/substrate/frame/nfts/Cargo.toml +++ b/substrate/frame/nfts/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } enumflags2 = { version = "0.7.7" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/nis/Cargo.toml b/substrate/frame/nis/Cargo.toml index 025daa07b0c..d0ba74a9273 100644 --- a/substrate/frame/nis/Cargo.toml +++ b/substrate/frame/nis/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml index a39b0ec4eff..63376163cdc 100644 --- a/substrate/frame/node-authorization/Cargo.toml +++ b/substrate/frame/node-authorization/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } diff --git a/substrate/frame/nomination-pools/Cargo.toml b/substrate/frame/nomination-pools/Cargo.toml index 9830f31d5fa..55e9ef6fbd3 100644 --- a/substrate/frame/nomination-pools/Cargo.toml +++ b/substrate/frame/nomination-pools/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } diff --git a/substrate/frame/nomination-pools/benchmarking/Cargo.toml b/substrate/frame/nomination-pools/benchmarking/Cargo.toml index 3693ad1866d..4985d7acbec 100644 --- a/substrate/frame/nomination-pools/benchmarking/Cargo.toml +++ b/substrate/frame/nomination-pools/benchmarking/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # FRAME frame-benchmarking = { path = "../../benchmarking", default-features = false } diff --git a/substrate/frame/nomination-pools/test-staking/Cargo.toml b/substrate/frame/nomination-pools/test-staking/Cargo.toml index 9c7b12e4c63..130a27752bf 100644 --- a/substrate/frame/nomination-pools/test-staking/Cargo.toml +++ b/substrate/frame/nomination-pools/test-staking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } sp-runtime = { path = "../../../primitives/runtime" } sp-io = { path = "../../../primitives/io" } diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index b3ef4671ce5..f8efc88bafc 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/offences/benchmarking/Cargo.toml b/substrate/frame/offences/benchmarking/Cargo.toml index 8dcce84d257..07905a1e0aa 100644 --- a/substrate/frame/offences/benchmarking/Cargo.toml +++ b/substrate/frame/offences/benchmarking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false } frame-election-provider-support = { path = "../../election-provider-support", default-features = false } frame-support = { path = "../../support", default-features = false } diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index bbe8e33d484..26f3d7e48ce 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } docify = "0.2.8" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/parameters/Cargo.toml b/substrate/frame/parameters/Cargo.toml index ab93be14e6c..2527bdf3a71 100644 --- a/substrate/frame/parameters/Cargo.toml +++ b/substrate/frame/parameters/Cargo.toml @@ -9,7 +9,7 @@ edition.workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["max-encoded-len"] } -scale-info = { version = "2.1.2", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } paste = { version = "1.0.14", default-features = false } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } docify = "0.2.8" diff --git a/substrate/frame/preimage/Cargo.toml b/substrate/frame/preimage/Cargo.toml index 10a15f97bd5..d67fc7bead0 100644 --- a/substrate/frame/preimage/Cargo.toml +++ b/substrate/frame/preimage/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/proxy/Cargo.toml b/substrate/frame/proxy/Cargo.toml index 17930079afd..0a3b39e471d 100644 --- a/substrate/frame/proxy/Cargo.toml +++ b/substrate/frame/proxy/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/ranked-collective/Cargo.toml b/substrate/frame/ranked-collective/Cargo.toml index 54e84c0b558..0a659580775 100644 --- a/substrate/frame/ranked-collective/Cargo.toml +++ b/substrate/frame/ranked-collective/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/recovery/Cargo.toml b/substrate/frame/recovery/Cargo.toml index 421c7995137..43608de37fc 100644 --- a/substrate/frame/recovery/Cargo.toml +++ b/substrate/frame/recovery/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index 2dfb25a2fd3..f4e0171443a 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -20,7 +20,7 @@ assert_matches = { version = "1.5", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index 45710f0539e..e746b0382ae 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index 80b33095068..ad3dcf1f90e 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } pallet-session = { path = "../session", default-features = false, features = ["historical"] } pallet-staking = { path = "../staking", default-features = false } diff --git a/substrate/frame/root-testing/Cargo.toml b/substrate/frame/root-testing/Cargo.toml index 51b72665b81..bf14516ee32 100644 --- a/substrate/frame/root-testing/Cargo.toml +++ b/substrate/frame/root-testing/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } diff --git a/substrate/frame/safe-mode/Cargo.toml b/substrate/frame/safe-mode/Cargo.toml index 6ddeff263c1..b6b7e5a67e4 100644 --- a/substrate/frame/safe-mode/Cargo.toml +++ b/substrate/frame/safe-mode/Cargo.toml @@ -20,7 +20,7 @@ docify = "0.2.8" frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml index ba57fd46eeb..8c77edcb173 100644 --- a/substrate/frame/salary/Cargo.toml +++ b/substrate/frame/salary/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index 325a39bf597..09977142efc 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] scale-codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index a3e684a2083..40a71736447 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -15,7 +15,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/scored-pool/Cargo.toml b/substrate/frame/scored-pool/Cargo.toml index ae6ade5189d..92b70e01b9a 100644 --- a/substrate/frame/scored-pool/Cargo.toml +++ b/substrate/frame/scored-pool/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } diff --git a/substrate/frame/session/Cargo.toml b/substrate/frame/session/Cargo.toml index de041307f70..86814f8276e 100644 --- a/substrate/frame/session/Cargo.toml +++ b/substrate/frame/session/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } pallet-timestamp = { path = "../timestamp", default-features = false } diff --git a/substrate/frame/session/benchmarking/Cargo.toml b/substrate/frame/session/benchmarking/Cargo.toml index 291fda3c4c7..a00fbd8f6fd 100644 --- a/substrate/frame/session/benchmarking/Cargo.toml +++ b/substrate/frame/session/benchmarking/Cargo.toml @@ -29,7 +29,7 @@ sp-std = { path = "../../../primitives/std", default-features = false } [dev-dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -scale-info = "2.10.0" +scale-info = "2.11.1" frame-election-provider-support = { path = "../../election-provider-support" } pallet-balances = { path = "../../balances" } pallet-staking-reward-curve = { path = "../../staking/reward-curve" } diff --git a/substrate/frame/society/Cargo.toml b/substrate/frame/society/Cargo.toml index 3dab082b395..3d99ddba392 100644 --- a/substrate/frame/society/Cargo.toml +++ b/substrate/frame/society/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } rand_chacha = { version = "0.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false } diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index 15c4bf9e290..4fd0cedbae6 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -20,7 +20,7 @@ serde = { features = ["alloc", "derive"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index e837956613e..613308c308e 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } thousands = { version = "0.2.0", optional = true } zstd = { version = "0.12.4", default-features = false, optional = true } diff --git a/substrate/frame/statement/Cargo.toml b/substrate/frame/statement/Cargo.toml index 6827dbda962..92bc32191ab 100644 --- a/substrate/frame/statement/Cargo.toml +++ b/substrate/frame/statement/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-statement-store = { path = "../../primitives/statement-store", default-features = false } diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index a60324847f1..805f46a77f2 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 3a61cfa6fac..ecdd9382632 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -22,7 +22,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } frame-metadata = { version = "16.0.0", default-features = false, features = [ diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 2f12cc00ed9..88124e0a43b 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] static_assertions = "1.1.0" serde = { features = ["derive"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } sp-api = { path = "../../../primitives/api", default-features = false } sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } diff --git a/substrate/frame/support/test/compile_pass/Cargo.toml b/substrate/frame/support/test/compile_pass/Cargo.toml index 0617aa105a2..3f52b4664b1 100644 --- a/substrate/frame/support/test/compile_pass/Cargo.toml +++ b/substrate/frame/support/test/compile_pass/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } renamed-frame-support = { package = "frame-support", path = "../..", default-features = false } renamed-frame-system = { package = "frame-system", path = "../../../system", default-features = false } sp-core = { path = "../../../../primitives/core", default-features = false } diff --git a/substrate/frame/support/test/pallet/Cargo.toml b/substrate/frame/support/test/pallet/Cargo.toml index ca889faef87..7a20c3f2730 100644 --- a/substrate/frame/support/test/pallet/Cargo.toml +++ b/substrate/frame/support/test/pallet/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], workspace = true } frame-support = { path = "../..", default-features = false } frame-system = { path = "../../../system", default-features = false } diff --git a/substrate/frame/support/test/stg_frame_crate/Cargo.toml b/substrate/frame/support/test/stg_frame_crate/Cargo.toml index 632ea4e794f..295b2a1a524 100644 --- a/substrate/frame/support/test/stg_frame_crate/Cargo.toml +++ b/substrate/frame/support/test/stg_frame_crate/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } frame = { path = "../../..", default-features = false, features = ["experimental", "runtime"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [features] default = ["std"] diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index d094c6bf984..16b3b946e22 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] cfg-if = "1.0" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } serde = { features = ["alloc", "derive"], workspace = true } frame-support = { path = "../support", default-features = false } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } diff --git a/substrate/frame/system/benchmarking/Cargo.toml b/substrate/frame/system/benchmarking/Cargo.toml index 80fdff756c0..473a6bb132d 100644 --- a/substrate/frame/system/benchmarking/Cargo.toml +++ b/substrate/frame/system/benchmarking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false } frame-support = { path = "../../support", default-features = false } frame-system = { path = "..", default-features = false } diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index d8ba45a2ad2..da49b29c89b 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index 7339cf0a8cc..a2acf0638ff 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 275e1c01f92..24e5a714f0f 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml index 7640cc815e5..fef9afdee05 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -24,7 +24,7 @@ frame-system = { path = "../../system", default-features = false } pallet-asset-conversion = { path = "../../asset-conversion", default-features = false } pallet-transaction-payment = { path = "..", default-features = false } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [dev-dependencies] sp-core = { path = "../../../primitives/core", default-features = false } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index 1da3237df08..fc4f1aecc15 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -29,7 +29,7 @@ frame-benchmarking = { path = "../../benchmarking", default-features = false, op # Other dependencies codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } [dev-dependencies] diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index 1386d9b5a56..31741cf32d8 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index 16bb4e92520..34037338a52 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -22,7 +22,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } docify = "0.2.8" impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index a5916c048f4..5f028179037 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -20,7 +20,7 @@ docify = "0.2.8" frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } pallet-balances = { path = "../balances", default-features = false, optional = true } diff --git a/substrate/frame/uniques/Cargo.toml b/substrate/frame/uniques/Cargo.toml index 4e5f21b3d8d..ee6af191d33 100644 --- a/substrate/frame/uniques/Cargo.toml +++ b/substrate/frame/uniques/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/utility/Cargo.toml b/substrate/frame/utility/Cargo.toml index ce5cd0fa61f..2ad575ed51f 100644 --- a/substrate/frame/utility/Cargo.toml +++ b/substrate/frame/utility/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/vesting/Cargo.toml b/substrate/frame/vesting/Cargo.toml index 96938b95a2a..e71731e3977 100644 --- a/substrate/frame/vesting/Cargo.toml +++ b/substrate/frame/vesting/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/whitelist/Cargo.toml b/substrate/frame/whitelist/Cargo.toml index 1a867f8075d..5c28fe29142 100644 --- a/substrate/frame/whitelist/Cargo.toml +++ b/substrate/frame/whitelist/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index f4b1d13c520..544ba72141e 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -28,7 +28,7 @@ sp-state-machine = { path = "../state-machine", default-features = false, option sp-trie = { path = "../trie", default-features = false, optional = true } hash-db = { version = "0.16.0", optional = true } thiserror = { optional = true, workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } sp-metadata-ir = { path = "../metadata-ir", default-features = false, optional = true } diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index 3a90553bbf0..52a4bd7bda3 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -26,11 +26,11 @@ codec = { package = "parity-scale-codec", version = "3.6.1" } sp-state-machine = { path = "../../state-machine" } trybuild = "1.0.88" rustversion = "1.0.6" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [dev-dependencies] criterion = "0.4.0" -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } sp-core = { path = "../../core" } static_assertions = "1.1.0" diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index 6f90a2b6262..20e2be4d155 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-core = { path = "../core", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } sp-std = { path = "../std", default-features = false } sp-io = { path = "../io", default-features = false } diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 45f48d77a31..16eae43c73f 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -23,7 +23,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } integer-sqrt = "0.1.2" num-traits = { version = "0.2.17", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } static_assertions = "1.1.0" sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/authority-discovery/Cargo.toml b/substrate/primitives/authority-discovery/Cargo.toml index 8ee8bb94ed9..88d93f40059 100644 --- a/substrate/primitives/authority-discovery/Cargo.toml +++ b/substrate/primitives/authority-discovery/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml index 9d13d627eeb..e716b61bfeb 100644 --- a/substrate/primitives/blockchain/Cargo.toml +++ b/substrate/primitives/blockchain/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } parking_lot = "0.12.1" schnellru = "0.2.1" diff --git a/substrate/primitives/consensus/aura/Cargo.toml b/substrate/primitives/consensus/aura/Cargo.toml index 0cedc59ea8f..b689c84f158 100644 --- a/substrate/primitives/consensus/aura/Cargo.toml +++ b/substrate/primitives/consensus/aura/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } sp-consensus-slots = { path = "../slots", default-features = false } diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index 724b9fd3e28..2420f48b1f4 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index fbcc6e0c104..a16d943b914 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } @@ -26,7 +26,7 @@ sp-io = { path = "../../io", default-features = false } sp-mmr-primitives = { path = "../../merkle-mountain-range", default-features = false } sp-runtime = { path = "../../runtime", default-features = false } sp-keystore = { path = "../../keystore", default-features = false } -strum = { version = "0.24.1", features = ["derive"], default-features = false } +strum = { version = "0.26.2", features = ["derive"], default-features = false } lazy_static = { version = "1.4.0", optional = true } [dev-dependencies] diff --git a/substrate/primitives/consensus/common/Cargo.toml b/substrate/primitives/consensus/common/Cargo.toml index 048e31b0265..90aeadd5055 100644 --- a/substrate/primitives/consensus/common/Cargo.toml +++ b/substrate/primitives/consensus/common/Cargo.toml @@ -17,8 +17,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" -futures = { version = "0.3.21", features = ["thread-pool"] } +async-trait = "0.1.79" +futures = { version = "0.3.30", features = ["thread-pool"] } log = { workspace = true, default-features = true } thiserror = { workspace = true } sp-core = { path = "../../core" } @@ -27,7 +27,7 @@ sp-runtime = { path = "../../runtime" } sp-state-machine = { path = "../../state-machine" } [dev-dependencies] -futures = "0.3.21" +futures = "0.3.30" sp-test-primitives = { path = "../../test-primitives" } [features] diff --git a/substrate/primitives/consensus/grandpa/Cargo.toml b/substrate/primitives/consensus/grandpa/Cargo.toml index 1f2da55c5a1..6c228383d00 100644 --- a/substrate/primitives/consensus/grandpa/Cargo.toml +++ b/substrate/primitives/consensus/grandpa/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } grandpa = { package = "finality-grandpa", version = "0.16.2", default-features = false, features = ["derive-codec"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 085709d4c8b..07304ed9b24 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false, features = ["bandersnatch-experimental"] } diff --git a/substrate/primitives/consensus/slots/Cargo.toml b/substrate/primitives/consensus/slots/Cargo.toml index 94c02dba203..a8b12900617 100644 --- a/substrate/primitives/consensus/slots/Cargo.toml +++ b/substrate/primitives/consensus/slots/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-timestamp = { path = "../../timestamp", default-features = false } diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 908f2498de5..833b2af95cd 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -38,7 +38,7 @@ sp-std = { path = "../std", default-features = false } sp-debug-derive = { path = "../debug-derive", default-features = false } sp-storage = { path = "../storage", default-features = false } sp-externalities = { path = "../externalities", optional = true } -futures = { version = "0.3.21", optional = true } +futures = { version = "0.3.30", optional = true } dyn-clonable = { version = "0.9.0", optional = true } thiserror = { optional = true, workspace = true } tracing = { version = "0.1.29", optional = true } diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index 6463c423fe7..c08ac459de5 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -17,15 +17,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" thiserror = { optional = true, workspace = true } sp-runtime = { path = "../runtime", default-features = false, optional = true } [dev-dependencies] -futures = "0.3.21" +futures = "0.3.30" [features] default = ["std"] diff --git a/substrate/primitives/keyring/Cargo.toml b/substrate/primitives/keyring/Cargo.toml index 940fe90916d..7471e9cf8ff 100644 --- a/substrate/primitives/keyring/Cargo.toml +++ b/substrate/primitives/keyring/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -strum = { version = "0.24.1", features = ["derive"], default-features = false } +strum = { version = "0.26.2", features = ["derive"], default-features = false } sp-core = { path = "../core", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index 9c07f699b37..891f893a0c9 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } serde = { features = ["alloc", "derive"], optional = true, workspace = true } diff --git a/substrate/primitives/metadata-ir/Cargo.toml b/substrate/primitives/metadata-ir/Cargo.toml index 31c839b5c48..ca8408d0ad9 100644 --- a/substrate/primitives/metadata-ir/Cargo.toml +++ b/substrate/primitives/metadata-ir/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [features] default = ["std"] diff --git a/substrate/primitives/mixnet/Cargo.toml b/substrate/primitives/mixnet/Cargo.toml index 8ba7f36da43..07840ca63cb 100644 --- a/substrate/primitives/mixnet/Cargo.toml +++ b/substrate/primitives/mixnet/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { default-features = false, path = "../api" } sp-application-crypto = { default-features = false, path = "../application-crypto" } diff --git a/substrate/primitives/npos-elections/Cargo.toml b/substrate/primitives/npos-elections/Cargo.toml index b0b9890c061..afa59af64d6 100644 --- a/substrate/primitives/npos-elections/Cargo.toml +++ b/substrate/primitives/npos-elections/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-arithmetic = { path = "../arithmetic", default-features = false } sp-core = { path = "../core", default-features = false } diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index 3128ebce8f7..fb5fd60fbbf 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -24,7 +24,7 @@ impl-trait-for-tuples = "0.2.2" log = { workspace = true } paste = "1.0" rand = { version = "0.8.5", optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-arithmetic = { path = "../arithmetic", default-features = false } diff --git a/substrate/primitives/session/Cargo.toml b/substrate/primitives/session/Cargo.toml index 784228c42e9..cdee4fb03e1 100644 --- a/substrate/primitives/session/Cargo.toml +++ b/substrate/primitives/session/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false } sp-core = { path = "../core", default-features = false } sp-runtime = { path = "../runtime", optional = true } diff --git a/substrate/primitives/staking/Cargo.toml b/substrate/primitives/staking/Cargo.toml index 6304551b8e6..e380abb6a8c 100644 --- a/substrate/primitives/staking/Cargo.toml +++ b/substrate/primitives/staking/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { features = ["alloc", "derive"], optional = true, workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" sp-core = { path = "../core", default-features = false } diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index 000fcd98704..b36bff69a00 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-core = { path = "../core", default-features = false } sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/test-primitives/Cargo.toml b/substrate/primitives/test-primitives/Cargo.toml index 1b51c5f5919..05135554315 100644 --- a/substrate/primitives/test-primitives/Cargo.toml +++ b/substrate/primitives/test-primitives/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-core = { path = "../core", default-features = false } diff --git a/substrate/primitives/timestamp/Cargo.toml b/substrate/primitives/timestamp/Cargo.toml index 11afb175590..5a1d4fcc985 100644 --- a/substrate/primitives/timestamp/Cargo.toml +++ b/substrate/primitives/timestamp/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } thiserror = { optional = true, workspace = true } sp-inherents = { path = "../inherents", default-features = false } diff --git a/substrate/primitives/transaction-storage-proof/Cargo.toml b/substrate/primitives/transaction-storage-proof/Cargo.toml index fbd0a4752fc..137a232fce7 100644 --- a/substrate/primitives/transaction-storage-proof/Cargo.toml +++ b/substrate/primitives/transaction-storage-proof/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-core = { path = "../core", optional = true } sp-inherents = { path = "../inherents", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index dd7ab080e5f..28c496d7a8e 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -29,7 +29,7 @@ memory-db = { version = "0.32.0", default-features = false } nohash-hasher = { version = "0.2.0", optional = true } parking_lot = { version = "0.12.1", optional = true } rand = { version = "0.8", optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } thiserror = { optional = true, workspace = true } tracing = { version = "0.1.29", optional = true } trie-db = { version = "0.28.0", default-features = false } diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index a94e2322430..d686b0c7551 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", default-features = false, optional = true } parity-wasm = { version = "0.45", optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } thiserror = { optional = true, workspace = true } sp-crypto-hashing-proc-macro = { path = "../crypto/hashing/proc-macro" } diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index d3118defb58..e73d4a702b4 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bounded-collections = { version = "0.2.0", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } smallvec = "1.11.0" sp-arithmetic = { path = "../arithmetic", default-features = false } diff --git a/substrate/test-utils/Cargo.toml b/substrate/test-utils/Cargo.toml index af8b01cdef0..56b1c038199 100644 --- a/substrate/test-utils/Cargo.toml +++ b/substrate/test-utils/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.16" +futures = "0.3.30" tokio = { version = "1.22.0", features = ["macros", "time"] } [dev-dependencies] diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index 349b04d32d7..4e4e65314fc 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } sc-client-api = { path = "../../client/api" } diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index f49503da8ca..ffbd59f39ad 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -22,7 +22,7 @@ sp-consensus-babe = { path = "../../primitives/consensus/babe", default-features sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } sp-block-builder = { path = "../../primitives/block-builder", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-inherents = { path = "../../primitives/inherents", default-features = false } sp-keyring = { path = "../../primitives/keyring", default-features = false } sp-offchain = { path = "../../primitives/offchain", default-features = false } @@ -53,7 +53,7 @@ array-bytes = { version = "6.1", optional = true } log = { workspace = true } [dev-dependencies] -futures = "0.3.21" +futures = "0.3.30" sc-block-builder = { path = "../../client/block-builder" } sc-chain-spec = { path = "../../client/chain-spec" } sc-executor = { path = "../../client/executor" } diff --git a/substrate/test-utils/runtime/client/Cargo.toml b/substrate/test-utils/runtime/client/Cargo.toml index cbb964f6785..5ca24fea33e 100644 --- a/substrate/test-utils/runtime/client/Cargo.toml +++ b/substrate/test-utils/runtime/client/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.21" +futures = "0.3.30" sc-block-builder = { path = "../../../client/block-builder" } sc-client-api = { path = "../../../client/api" } sc-consensus = { path = "../../../client/consensus/common" } diff --git a/substrate/test-utils/runtime/transaction-pool/Cargo.toml b/substrate/test-utils/runtime/transaction-pool/Cargo.toml index 33e56e8e55e..9b52706c739 100644 --- a/substrate/test-utils/runtime/transaction-pool/Cargo.toml +++ b/substrate/test-utils/runtime/transaction-pool/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" parking_lot = "0.12.1" thiserror = { workspace = true } sc-transaction-pool = { path = "../../../client/transaction-pool" } diff --git a/substrate/utils/binary-merkle-tree/Cargo.toml b/substrate/utils/binary-merkle-tree/Cargo.toml index 6ba515afee1..a89006d94dc 100644 --- a/substrate/utils/binary-merkle-tree/Cargo.toml +++ b/substrate/utils/binary-merkle-tree/Cargo.toml @@ -18,7 +18,7 @@ hash-db = { version = "0.16.0", default-features = false } [dev-dependencies] array-bytes = "6.1" -env_logger = "0.9" +env_logger = "0.11" sp-core = { path = "../../primitives/core" } sp-runtime = { path = "../../primitives/runtime" } diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml index 61e0a861ee0..82b01915483 100644 --- a/substrate/utils/frame/remote-externalities/Cargo.toml +++ b/substrate/utils/frame/remote-externalities/Cargo.toml @@ -26,7 +26,7 @@ sp-io = { path = "../../../primitives/io" } sp-runtime = { path = "../../../primitives/runtime" } tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] } substrate-rpc-client = { path = "../rpc/client" } -futures = "0.3" +futures = "0.3.30" indicatif = "0.17.7" spinners = "4.1.0" tokio-retry = "0.3.0" diff --git a/substrate/utils/frame/rpc/client/Cargo.toml b/substrate/utils/frame/rpc/client/Cargo.toml index b51e3f44f4e..501bb95b257 100644 --- a/substrate/utils/frame/rpc/client/Cargo.toml +++ b/substrate/utils/frame/rpc/client/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["ws-client"] } sc-rpc-api = { path = "../../../../client/rpc-api" } -async-trait = "0.1.74" +async-trait = "0.1.79" serde = { workspace = true, default-features = true } sp-runtime = { path = "../../../../primitives/runtime" } log = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml index 2e4bb6a1057..84db06da7b0 100644 --- a/substrate/utils/frame/rpc/support/Cargo.toml +++ b/substrate/utils/frame/rpc/support/Cargo.toml @@ -23,9 +23,9 @@ sc-rpc-api = { path = "../../../../client/rpc-api" } sp-storage = { path = "../../../../primitives/storage" } [dev-dependencies] -scale-info = "2.10.0" +scale-info = "2.11.1" jsonrpsee = { version = "0.22", features = ["jsonrpsee-types", "ws-client"] } -tokio = "1.22.0" +tokio = "1.37" sp-core = { path = "../../../../primitives/core" } sp-runtime = { path = "../../../../primitives/runtime" } frame-system = { path = "../../../../frame/system" } diff --git a/substrate/utils/frame/rpc/system/Cargo.toml b/substrate/utils/frame/rpc/system/Cargo.toml index f9a84a01af8..9e571bef9d0 100644 --- a/substrate/utils/frame/rpc/system/Cargo.toml +++ b/substrate/utils/frame/rpc/system/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } frame-system-rpc-runtime-api = { path = "../../../../frame/system/rpc/runtime-api" } sc-rpc-api = { path = "../../../../client/rpc-api" } @@ -31,7 +31,7 @@ sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] sc-transaction-pool = { path = "../../../../client/transaction-pool" } -tokio = "1.22.0" +tokio = "1.37" assert_matches = "1.3.0" sp-tracing = { path = "../../../../primitives/tracing" } substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } diff --git a/substrate/utils/frame/try-runtime/cli/Cargo.toml b/substrate/utils/frame/try-runtime/cli/Cargo.toml index 3ff57745a2d..618cb645475 100644 --- a/substrate/utils/frame/try-runtime/cli/Cargo.toml +++ b/substrate/utils/frame/try-runtime/cli/Cargo.toml @@ -37,7 +37,7 @@ sp-weights = { path = "../../../../primitives/weights" } frame-try-runtime = { path = "../../../../frame/try-runtime", optional = true } substrate-rpc-client = { path = "../../rpc/client" } -async-trait = "0.1.74" +async-trait = "0.1.79" clap = { version = "4.5.3", features = ["derive"] } hex = { version = "0.4.3", default-features = false } log = { workspace = true, default-features = true } @@ -52,7 +52,7 @@ node-primitives = { path = "../../../../bin/node/primitives" } regex = "1.7.3" substrate-cli-test-utils = { path = "../../../../test-utils/cli" } tempfile = "3.1.0" -tokio = "1.27.0" +tokio = "1.37" [features] try-runtime = [ diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index 7abd1a20284..bac323e2e6a 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] build-helper = "0.1.1" cargo_metadata = "0.15.4" console = "0.15.8" -strum = { version = "0.24.1", features = ["derive"] } +strum = { version = "0.26.2", features = ["derive"] } tempfile = "3.1.0" toml = "0.8.8" walkdir = "2.4.0" diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index 41d9708ea60..0668304e502 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "4.5.3", features = ["derive"] } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } futures-timer = "3.0.1" jsonrpsee = { version = "0.22", features = ["server"] } serde_json = { workspace = true, default-features = true } diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml index 9982e5ea53b..a85391f2942 100644 --- a/templates/minimal/pallets/template/Cargo.toml +++ b/templates/minimal/pallets/template/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ], default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } frame = { path = "../../../../substrate/frame", default-features = false, features = [ diff --git a/templates/parachain/pallets/template/Cargo.toml b/templates/parachain/pallets/template/Cargo.toml index 89eb9d51716..199da2f12d2 100644 --- a/templates/parachain/pallets/template/Cargo.toml +++ b/templates/parachain/pallets/template/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index 41a510c5ed3..0d985796a11 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -24,7 +24,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = ] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } smallvec = "1.11.0" diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml index a0bb5c27ed3..1ddd1cb3a7b 100644 --- a/templates/solochain/node/Cargo.toml +++ b/templates/solochain/node/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "4.5.3", features = ["derive"] } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } serde_json = { workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } diff --git a/templates/solochain/pallets/template/Cargo.toml b/templates/solochain/pallets/template/Cargo.toml index bd234715198..24519f1d22e 100644 --- a/templates/solochain/pallets/template/Cargo.toml +++ b/templates/solochain/pallets/template/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml index 90dd823eb64..7a81f192043 100644 --- a/templates/solochain/runtime/Cargo.toml +++ b/templates/solochain/runtime/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", "serde", ] } -- GitLab From e54279699b646b0be4b48e898f0b7e3d5fe1b033 Mon Sep 17 00:00:00 2001 From: Dastan <88332432+dastansam@users.noreply.github.com> Date: Tue, 2 Apr 2024 15:43:09 +0200 Subject: [PATCH 084/128] migrations: prevent accidentally using unversioned migrations instead of `VersionedMigration` (#3835) closes #1324 #### Problem Currently, it is possible to accidentally use inner unversioned migration instead of `VersionedMigration` since both implement `OnRuntimeUpgrade`. #### Solution With this change, we make it clear that value of `Inner` is not intended to be used directly. It is achieved by bounding `Inner` to new trait `UncheckedOnRuntimeUpgrade`, which has the same interface (except `unchecked_` prefix) as `OnRuntimeUpgrade`. #### `try-runtime` functions Since developers can implement `try-runtime` for `Inner` value in `VersionedMigration` and have custom logic for it, I added the same `try-runtime` functions to `UncheckedOnRuntimeUpgrade`. I looked for a ways to not duplicate functions, but couldn't find anything that doesn't significantly change the codebase. So I would appreciate If you have any suggestions to improve this cc @liamaharon @xlc polkadot address: 16FqwPZ8GRC5U5D4Fu7W33nA55ZXzXGWHwmbnE1eT6pxuqcT --------- Co-authored-by: Liam Aharon --- cumulus/pallets/xcmp-queue/src/migration.rs | 9 +- .../common/src/assigned_slots/migration.rs | 5 +- .../common/src/paras_registrar/migration.rs | 4 +- .../src/assigner_on_demand/migration.rs | 8 +- .../src/configuration/migration/v10.rs | 10 +- .../src/configuration/migration/v11.rs | 8 +- .../src/configuration/migration/v12.rs | 4 +- .../parachains/src/inclusion/migration.rs | 10 +- .../parachains/src/scheduler/migration.rs | 9 +- polkadot/xcm/pallet-xcm/src/migration.rs | 4 +- prdoc/pr_3835.prdoc | 54 +++++ .../single-block-migrations/src/lib.rs | 24 +-- .../src/migrations/v1.rs | 196 ++++++++---------- substrate/frame/grandpa/src/migrations/v5.rs | 15 +- substrate/frame/identity/src/migration.rs | 6 +- .../frame/nomination-pools/src/migration.rs | 8 +- substrate/frame/society/src/migrations.rs | 4 +- substrate/frame/support/src/migrations.rs | 22 +- substrate/frame/support/src/traits.rs | 2 +- substrate/frame/support/src/traits/hooks.rs | 28 ++- .../support/test/tests/versioned_migration.rs | 8 +- substrate/frame/uniques/src/migration.rs | 8 +- 22 files changed, 257 insertions(+), 189 deletions(-) create mode 100644 prdoc/pr_3835.prdoc diff --git a/cumulus/pallets/xcmp-queue/src/migration.rs b/cumulus/pallets/xcmp-queue/src/migration.rs index c7fa61a3e3f..1702cd70bc2 100644 --- a/cumulus/pallets/xcmp-queue/src/migration.rs +++ b/cumulus/pallets/xcmp-queue/src/migration.rs @@ -20,7 +20,7 @@ use crate::{Config, OverweightIndex, Pallet, QueueConfig, QueueConfigData, DEFAU use cumulus_primitives_core::XcmpMessageFormat; use frame_support::{ pallet_prelude::*, - traits::{EnqueueMessage, OnRuntimeUpgrade, StorageVersion}, + traits::{EnqueueMessage, StorageVersion, UncheckedOnRuntimeUpgrade}, weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight}, }; @@ -96,7 +96,7 @@ pub mod v2 { /// 2D weights). pub struct UncheckedMigrationToV2(PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrationToV2 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrationToV2 { #[allow(deprecated)] fn on_runtime_upgrade() -> Weight { let translate = |pre: v1::QueueConfigData| -> v2::QueueConfigData { @@ -187,7 +187,7 @@ pub mod v3 { /// Migrates the pallet storage to v3. pub struct UncheckedMigrationToV3(PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrationToV3 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrationToV3 { fn on_runtime_upgrade() -> Weight { #[frame_support::storage_alias] type Overweight = @@ -266,7 +266,7 @@ pub mod v4 { /// thresholds to at least the default values. pub struct UncheckedMigrationToV4(PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrationToV4 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrationToV4 { fn on_runtime_upgrade() -> Weight { let translate = |pre: v2::QueueConfigData| -> QueueConfigData { let pre_default = v2::QueueConfigData::default(); @@ -315,6 +315,7 @@ pub mod v4 { mod tests { use super::*; use crate::mock::{new_test_ext, Test}; + use frame_support::traits::OnRuntimeUpgrade; #[test] #[allow(deprecated)] diff --git a/polkadot/runtime/common/src/assigned_slots/migration.rs b/polkadot/runtime/common/src/assigned_slots/migration.rs index def6bad692a..7e582dfa596 100644 --- a/polkadot/runtime/common/src/assigned_slots/migration.rs +++ b/polkadot/runtime/common/src/assigned_slots/migration.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use super::{Config, MaxPermanentSlots, MaxTemporarySlots, Pallet, LOG_TARGET}; -use frame_support::traits::{Get, GetStorageVersion, OnRuntimeUpgrade}; +use frame_support::traits::{Get, GetStorageVersion, UncheckedOnRuntimeUpgrade}; #[cfg(feature = "try-runtime")] use frame_support::ensure; @@ -23,10 +23,9 @@ use frame_support::ensure; use sp_std::vec::Vec; pub mod v1 { - use super::*; pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { let on_chain_version = Pallet::::on_chain_storage_version(); diff --git a/polkadot/runtime/common/src/paras_registrar/migration.rs b/polkadot/runtime/common/src/paras_registrar/migration.rs index f977674a1e4..18bb6bbfb55 100644 --- a/polkadot/runtime/common/src/paras_registrar/migration.rs +++ b/polkadot/runtime/common/src/paras_registrar/migration.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use super::*; -use frame_support::traits::{Contains, OnRuntimeUpgrade}; +use frame_support::traits::{Contains, UncheckedOnRuntimeUpgrade}; #[derive(Encode, Decode)] pub struct ParaInfoV1 { @@ -27,7 +27,7 @@ pub struct ParaInfoV1 { pub struct VersionUncheckedMigrateToV1( sp_std::marker::PhantomData<(T, UnlockParaIds)>, ); -impl> OnRuntimeUpgrade +impl> UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs index 5071653377d..8589ddc292b 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs @@ -18,7 +18,7 @@ use super::*; use frame_support::{ migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias, - traits::OnRuntimeUpgrade, weights::Weight, + traits::UncheckedOnRuntimeUpgrade, weights::Weight, }; mod v0 { @@ -51,7 +51,7 @@ mod v1 { /// Migration to V1 pub struct UncheckedMigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrateToV1 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { let mut weight: Weight = Weight::zero(); @@ -141,7 +141,7 @@ pub type MigrateV0ToV1 = VersionedMigration< #[cfg(test)] mod tests { - use super::{v0, v1, OnRuntimeUpgrade, Weight}; + use super::{v0, v1, UncheckedOnRuntimeUpgrade, Weight}; use crate::mock::{new_test_ext, MockGenesisConfig, OnDemandAssigner, Test}; use primitives::Id as ParaId; @@ -163,7 +163,7 @@ mod tests { // For tests, db weight is zero. assert_eq!( - as OnRuntimeUpgrade>::on_runtime_upgrade(), + as UncheckedOnRuntimeUpgrade>::on_runtime_upgrade(), Weight::zero() ); diff --git a/polkadot/runtime/parachains/src/configuration/migration/v10.rs b/polkadot/runtime/parachains/src/configuration/migration/v10.rs index 3c8d6084ace..fa72c357d7d 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v10.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v10.rs @@ -17,7 +17,11 @@ //! A module that is responsible for migration of storage. use crate::configuration::{Config, Pallet}; -use frame_support::{pallet_prelude::*, traits::Defensive, weights::Weight}; +use frame_support::{ + pallet_prelude::*, + traits::{Defensive, UncheckedOnRuntimeUpgrade}, + weights::Weight, +}; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ AsyncBackingParams, Balance, ExecutorParams, NodeFeatures, SessionIndex, @@ -26,8 +30,6 @@ use primitives::{ use sp_runtime::Perbill; use sp_std::vec::Vec; -use frame_support::traits::OnRuntimeUpgrade; - use super::v9::V9HostConfiguration; // All configuration of the runtime with respect to paras. #[derive(Clone, Encode, PartialEq, Decode, Debug)] @@ -163,7 +165,7 @@ mod v10 { } pub struct VersionUncheckedMigrateToV10(sp_std::marker::PhantomData); -impl OnRuntimeUpgrade for VersionUncheckedMigrateToV10 { +impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV10 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV10"); diff --git a/polkadot/runtime/parachains/src/configuration/migration/v11.rs b/polkadot/runtime/parachains/src/configuration/migration/v11.rs index 7ed9d086885..65656e8d7c0 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v11.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v11.rs @@ -18,7 +18,10 @@ use crate::configuration::{self, Config, Pallet}; use frame_support::{ - migrations::VersionedMigration, pallet_prelude::*, traits::Defensive, weights::Weight, + migrations::VersionedMigration, + pallet_prelude::*, + traits::{Defensive, UncheckedOnRuntimeUpgrade}, + weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ @@ -27,7 +30,6 @@ use primitives::{ }; use sp_std::vec::Vec; -use frame_support::traits::OnRuntimeUpgrade; use polkadot_core_primitives::Balance; use sp_arithmetic::Perbill; @@ -176,7 +178,7 @@ pub type MigrateToV11 = VersionedMigration< >; pub struct UncheckedMigrateToV11(sp_std::marker::PhantomData); -impl OnRuntimeUpgrade for UncheckedMigrateToV11 { +impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV11 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV11"); diff --git a/polkadot/runtime/parachains/src/configuration/migration/v12.rs b/polkadot/runtime/parachains/src/configuration/migration/v12.rs index 4295a79893e..69bacc83d04 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v12.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v12.rs @@ -20,7 +20,7 @@ use crate::configuration::{self, migration::v11::V11HostConfiguration, Config, P use frame_support::{ migrations::VersionedMigration, pallet_prelude::*, - traits::{Defensive, OnRuntimeUpgrade}, + traits::{Defensive, UncheckedOnRuntimeUpgrade}, }; use frame_system::pallet_prelude::BlockNumberFor; use primitives::vstaging::SchedulerParams; @@ -70,7 +70,7 @@ pub type MigrateToV12 = VersionedMigration< pub struct UncheckedMigrateToV12(sp_std::marker::PhantomData); -impl OnRuntimeUpgrade for UncheckedMigrateToV12 { +impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV12 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV12"); diff --git a/polkadot/runtime/parachains/src/inclusion/migration.rs b/polkadot/runtime/parachains/src/inclusion/migration.rs index 1e63b209f4e..5f35680ee69 100644 --- a/polkadot/runtime/parachains/src/inclusion/migration.rs +++ b/polkadot/runtime/parachains/src/inclusion/migration.rs @@ -73,7 +73,7 @@ mod v1 { CandidatePendingAvailability as V1CandidatePendingAvailability, Config, Pallet, PendingAvailability as V1PendingAvailability, }; - use frame_support::{traits::OnRuntimeUpgrade, weights::Weight}; + use frame_support::{traits::UncheckedOnRuntimeUpgrade, weights::Weight}; use sp_core::Get; use sp_std::{collections::vec_deque::VecDeque, vec::Vec}; @@ -87,7 +87,7 @@ mod v1 { pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { log::trace!(target: crate::inclusion::LOG_TARGET, "Running pre_upgrade() for inclusion MigrateToV1"); @@ -216,7 +216,7 @@ mod tests { }, mock::{new_test_ext, MockGenesisConfig, Test}, }; - use frame_support::traits::OnRuntimeUpgrade; + use frame_support::traits::UncheckedOnRuntimeUpgrade; use primitives::{AvailabilityBitfield, Id as ParaId}; use test_helpers::{dummy_candidate_commitments, dummy_candidate_descriptor, dummy_hash}; @@ -225,7 +225,7 @@ mod tests { new_test_ext(MockGenesisConfig::default()).execute_with(|| { // No data to migrate. assert_eq!( - as OnRuntimeUpgrade>::on_runtime_upgrade(), + as UncheckedOnRuntimeUpgrade>::on_runtime_upgrade(), Weight::zero() ); assert!(V1PendingAvailability::::iter().next().is_none()); @@ -299,7 +299,7 @@ mod tests { // For tests, db weight is zero. assert_eq!( - as OnRuntimeUpgrade>::on_runtime_upgrade(), + as UncheckedOnRuntimeUpgrade>::on_runtime_upgrade(), Weight::zero() ); diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index c47fbab046f..b030940fb41 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -19,7 +19,7 @@ use super::*; use frame_support::{ migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias, - traits::OnRuntimeUpgrade, weights::Weight, + traits::UncheckedOnRuntimeUpgrade, weights::Weight, }; /// Old/legacy assignment representation (v0). @@ -105,7 +105,8 @@ mod v0 { // - Assignments only consist of `ParaId`, `Assignment` is a concrete type (Same as V0Assignment). mod v1 { use frame_support::{ - pallet_prelude::ValueQuery, storage_alias, traits::OnRuntimeUpgrade, weights::Weight, + pallet_prelude::ValueQuery, storage_alias, traits::UncheckedOnRuntimeUpgrade, + weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; @@ -164,7 +165,7 @@ mod v1 { /// Migration to V1 pub struct UncheckedMigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrateToV1 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { let mut weight: Weight = Weight::zero(); @@ -302,7 +303,7 @@ mod v2 { /// Migration to V2 pub struct UncheckedMigrateToV2(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrateToV2 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV2 { fn on_runtime_upgrade() -> Weight { let mut weight: Weight = Weight::zero(); diff --git a/polkadot/xcm/pallet-xcm/src/migration.rs b/polkadot/xcm/pallet-xcm/src/migration.rs index 018436aa3c9..b157e6b5c3d 100644 --- a/polkadot/xcm/pallet-xcm/src/migration.rs +++ b/polkadot/xcm/pallet-xcm/src/migration.rs @@ -19,7 +19,7 @@ use crate::{ }; use frame_support::{ pallet_prelude::*, - traits::{OnRuntimeUpgrade, StorageVersion}, + traits::{OnRuntimeUpgrade, StorageVersion, UncheckedOnRuntimeUpgrade}, weights::Weight, }; @@ -35,7 +35,7 @@ pub mod v1 { /// /// Use experimental [`MigrateToV1`] instead. pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { let mut weight = T::DbWeight::get().reads(1); diff --git a/prdoc/pr_3835.prdoc b/prdoc/pr_3835.prdoc new file mode 100644 index 00000000000..d2f49f8fc11 --- /dev/null +++ b/prdoc/pr_3835.prdoc @@ -0,0 +1,54 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "migrations: prevent accidentally using inner unversioned migration instead of `VersionedMigration`" + +doc: + - audience: Runtime Dev + description: | + Currently, it is possible to accidentally use inner unversioned migration instead of `VersionedMigration` + since both implement `OnRuntimeUpgrade`. With this change, we make it clear that `Inner` is not intended + to be used directly. It is achieved by bounding `Inner` to new trait `UncheckedOnRuntimeUpgrade`, which + has the same interface as `OnRuntimeUpgrade`, but can not be used directly for runtime upgrade migrations. + + This change will break all existing migrations passed to `VersionedMigration`. Developers should simply change + those migrations to implement `UncheckedOnRuntimeUpgrade` instead of `OnRuntimeUpgrade`. + + Example: + + ``` + --- a/path/to/migration.rs + +++ b/path/to/migration.rs + @@ -1,7 +1,7 @@ + -impl OnRuntimeUpgrade for MigrateVNToVM { + +impl UncheckedOnRuntimeUpgrade for MigrateVNToVM { + fn on_runtime_upgrade() -> Weight { + // Migration logic here + // Adjust the migration logic if necessary to align with the expectations + // of new `UncheckedOnRuntimeUpgrade` trait. + 0 + } + } + ``` + +crates: + - name: "pallet-example-single-block-migrations" + bump: "major" + - name: "pallet-xcm" + bump: "major" + - name: "pallet-grandpa" + bump: "major" + - name: "pallet-identity" + bump: "major" + - name: "pallet-nomination-pools" + bump: "major" + - name: "pallet-society" + bump: "major" + - name: "frame-support" + bump: "major" + - name: "pallet-uniques" + bump: "major" + - name: "polkadot-runtime-parachains" + bump: "major" + - name: "polkadot-runtime-common" + bump: "major" diff --git a/substrate/frame/examples/single-block-migrations/src/lib.rs b/substrate/frame/examples/single-block-migrations/src/lib.rs index b36d5262267..411537aa8c6 100644 --- a/substrate/frame/examples/single-block-migrations/src/lib.rs +++ b/substrate/frame/examples/single-block-migrations/src/lib.rs @@ -89,7 +89,7 @@ //! //! See the migration source code for detailed comments. //! -//! To keep the migration logic organised, it is split across additional modules: +//! Here's a brief overview of modules and types defined in `v1.rs`: //! //! ### `mod v0` //! @@ -98,28 +98,29 @@ //! //! This allows reading the old v0 value from storage during the migration. //! -//! ### `mod version_unchecked` +//! ### `InnerMigrateV0ToV1` //! //! Here we define our raw migration logic, -//! `version_unchecked::MigrateV0ToV1` which implements the [`OnRuntimeUpgrade`] trait. +//! `InnerMigrateV0ToV1` which implements the [`UncheckedOnRuntimeUpgrade`] trait. //! -//! Importantly, it is kept in a private module so that it cannot be accidentally used in a runtime. +//! #### Why [`UncheckedOnRuntimeUpgrade`]? //! -//! Private modules cannot be referenced in docs, so please read the code directly. +//! Otherwise, we would have two implementations of [`OnRuntimeUpgrade`] which could be confusing, +//! and may lead to accidentally using the wrong one. //! //! #### Standalone Struct or Pallet Hook? //! //! Note that the storage migration logic is attached to a standalone struct implementing -//! [`OnRuntimeUpgrade`], rather than implementing the +//! [`UncheckedOnRuntimeUpgrade`], rather than implementing the //! [`Hooks::on_runtime_upgrade`](frame_support::traits::Hooks::on_runtime_upgrade) hook directly on //! the pallet. The pallet hook is better suited for special types of logic that need to execute on //! every runtime upgrade, but not so much for one-off storage migrations. //! -//! ### `pub mod versioned` +//! ### `MigrateV0ToV1` //! -//! Here, `version_unchecked::MigrateV0ToV1` is wrapped in a +//! Here, `InnerMigrateV0ToV1` is wrapped in a //! [`VersionedMigration`] to define -//! [`versioned::MigrateV0ToV1`](crate::migrations::v1::versioned::MigrateV0ToV1), which may be used +//! [`MigrateV0ToV1`](crate::migrations::v1::MigrateV0ToV1), which may be used //! in runtimes. //! //! Using [`VersionedMigration`] ensures that @@ -128,8 +129,6 @@ //! - Reads and writes from checking and setting the on-chain storage version are accounted for in //! the final [`Weight`](frame_support::weights::Weight) //! -//! This is the only public module exported from `v1`. -//! //! ### `mod test` //! //! Here basic unit tests are defined for the migration. @@ -142,7 +141,8 @@ //! [`VersionedMigration`]: frame_support::migrations::VersionedMigration //! [`GetStorageVersion`]: frame_support::traits::GetStorageVersion //! [`OnRuntimeUpgrade`]: frame_support::traits::OnRuntimeUpgrade -//! [`MigrateV0ToV1`]: crate::migrations::v1::versioned::MigrationV0ToV1 +//! [`UncheckedOnRuntimeUpgrade`]: frame_support::traits::UncheckedOnRuntimeUpgrade +//! [`MigrateV0ToV1`]: crate::migrations::v1::MigrateV0ToV1 // We make sure this pallet uses `no_std` for compiling to Wasm. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs index b46640a3202..18ef4e72cc4 100644 --- a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs +++ b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs @@ -17,7 +17,7 @@ use frame_support::{ storage_alias, - traits::{Get, OnRuntimeUpgrade}, + traits::{Get, UncheckedOnRuntimeUpgrade}, }; #[cfg(feature = "try-runtime")] @@ -34,118 +34,92 @@ mod v0 { pub type Value = StorageValue, u32>; } -/// Private module containing *version unchecked* migration logic. +/// Implements [`UncheckedOnRuntimeUpgrade`], migrating the state of this pallet from V0 to V1. /// -/// Should only be used by the [`VersionedMigration`](frame_support::migrations::VersionedMigration) -/// type in this module to create something to export. +/// In V0 of the template [`crate::Value`] is just a `u32`. In V1, it has been upgraded to +/// contain the struct [`crate::CurrentAndPreviousValue`]. /// -/// The unversioned migration should be kept private so the unversioned migration cannot -/// accidentally be used in any runtimes. -/// -/// For more about this pattern of keeping items private, see -/// - -/// - -mod version_unchecked { - use super::*; +/// In this migration, update the on-chain storage for the pallet to reflect the new storage +/// layout. +pub struct InnerMigrateV0ToV1(sp_std::marker::PhantomData); + +impl UncheckedOnRuntimeUpgrade for InnerMigrateV0ToV1 { + /// Return the existing [`crate::Value`] so we can check that it was correctly set in + /// `InnerMigrateV0ToV1::post_upgrade`. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + + // Access the old value using the `storage_alias` type + let old_value = v0::Value::::get(); + // Return it as an encoded `Vec` + Ok(old_value.encode()) + } - /// Implements [`OnRuntimeUpgrade`], migrating the state of this pallet from V0 to V1. - /// - /// In V0 of the template [`crate::Value`] is just a `u32`. In V1, it has been upgraded to - /// contain the struct [`crate::CurrentAndPreviousValue`]. + /// Migrate the storage from V0 to V1. /// - /// In this migration, update the on-chain storage for the pallet to reflect the new storage - /// layout. - pub struct MigrateV0ToV1(sp_std::marker::PhantomData); - - impl OnRuntimeUpgrade for MigrateV0ToV1 { - /// Return the existing [`crate::Value`] so we can check that it was correctly set in - /// `version_unchecked::MigrateV0ToV1::post_upgrade`. - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - use codec::Encode; - - // Access the old value using the `storage_alias` type - let old_value = v0::Value::::get(); - // Return it as an encoded `Vec` - Ok(old_value.encode()) - } - - /// Migrate the storage from V0 to V1. - /// - /// - If the value doesn't exist, there is nothing to do. - /// - If the value exists, it is read and then written back to storage inside a - /// [`crate::CurrentAndPreviousValue`]. - fn on_runtime_upgrade() -> frame_support::weights::Weight { - // Read the old value from storage - if let Some(old_value) = v0::Value::::take() { - // Write the new value to storage - let new = crate::CurrentAndPreviousValue { current: old_value, previous: None }; - crate::Value::::put(new); - // One read for the old value, one write for the new value - T::DbWeight::get().reads_writes(1, 1) - } else { - // One read for trying to access the old value - T::DbWeight::get().reads(1) - } + /// - If the value doesn't exist, there is nothing to do. + /// - If the value exists, it is read and then written back to storage inside a + /// [`crate::CurrentAndPreviousValue`]. + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // Read the old value from storage + if let Some(old_value) = v0::Value::::take() { + // Write the new value to storage + let new = crate::CurrentAndPreviousValue { current: old_value, previous: None }; + crate::Value::::put(new); + // One read for the old value, one write for the new value + T::DbWeight::get().reads_writes(1, 1) + } else { + // One read for trying to access the old value + T::DbWeight::get().reads(1) } + } - /// Verifies the storage was migrated correctly. - /// - /// - If there was no old value, the new value should not be set. - /// - If there was an old value, the new value should be a - /// [`crate::CurrentAndPreviousValue`]. - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { - use codec::Decode; - use frame_support::ensure; - - let maybe_old_value = Option::::decode(&mut &state[..]).map_err(|_| { - sp_runtime::TryRuntimeError::Other("Failed to decode old value from storage") - })?; - - match maybe_old_value { - Some(old_value) => { - let expected_new_value = - crate::CurrentAndPreviousValue { current: old_value, previous: None }; - let actual_new_value = crate::Value::::get(); - - ensure!(actual_new_value.is_some(), "New value not set"); - ensure!( - actual_new_value == Some(expected_new_value), - "New value not set correctly" - ); - }, - None => { - ensure!(crate::Value::::get().is_none(), "New value unexpectedly set"); - }, - }; - Ok(()) - } + /// Verifies the storage was migrated correctly. + /// + /// - If there was no old value, the new value should not be set. + /// - If there was an old value, the new value should be a [`crate::CurrentAndPreviousValue`]. + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + use codec::Decode; + use frame_support::ensure; + + let maybe_old_value = Option::::decode(&mut &state[..]).map_err(|_| { + sp_runtime::TryRuntimeError::Other("Failed to decode old value from storage") + })?; + + match maybe_old_value { + Some(old_value) => { + let expected_new_value = + crate::CurrentAndPreviousValue { current: old_value, previous: None }; + let actual_new_value = crate::Value::::get(); + + ensure!(actual_new_value.is_some(), "New value not set"); + ensure!( + actual_new_value == Some(expected_new_value), + "New value not set correctly" + ); + }, + None => { + ensure!(crate::Value::::get().is_none(), "New value unexpectedly set"); + }, + }; + Ok(()) } } -/// Public module containing *version checked* migration logic. -/// -/// This is the only module that should be exported from this module. -/// -/// See [`VersionedMigration`](frame_support::migrations::VersionedMigration) docs for more about -/// how it works. -pub mod versioned { - use super::*; - - /// `version_unchecked::MigrateV0ToV1` wrapped in a - /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), which ensures that: - /// - The migration only runs once when the on-chain storage version is 0 - /// - The on-chain storage version is updated to `1` after the migration executes - /// - Reads/Writes from checking/settings the on-chain storage version are accounted for - pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< - 0, // The migration will only execute when the on-chain storage version is 0 - 1, // The on-chain storage version will be set to 1 after the migration is complete - version_unchecked::MigrateV0ToV1, - crate::pallet::Pallet, - ::DbWeight, - >; -} +/// [`UncheckedOnRuntimeUpgrade`] implementation [`InnerMigrateV0ToV1`] wrapped in a +/// [`VersionedMigration`](frame_support::migrations::VersionedMigration), which ensures that: +/// - The migration only runs once when the on-chain storage version is 0 +/// - The on-chain storage version is updated to `1` after the migration executes +/// - Reads/Writes from checking/settings the on-chain storage version are accounted for +pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< + 0, // The migration will only execute when the on-chain storage version is 0 + 1, // The on-chain storage version will be set to 1 after the migration is complete + InnerMigrateV0ToV1, + crate::pallet::Pallet, + ::DbWeight, +>; /// Tests for our migration. /// @@ -155,10 +129,10 @@ pub mod versioned { /// 3. The storage is in the expected state after the migration #[cfg(any(all(feature = "try-runtime", test), doc))] mod test { + use self::InnerMigrateV0ToV1; use super::*; use crate::mock::{new_test_ext, MockRuntime}; use frame_support::assert_ok; - use version_unchecked::MigrateV0ToV1; #[test] fn handles_no_existing_value() { @@ -168,16 +142,16 @@ mod test { assert!(v0::Value::::get().is_none()); // Get the pre_upgrade bytes - let bytes = match MigrateV0ToV1::::pre_upgrade() { + let bytes = match InnerMigrateV0ToV1::::pre_upgrade() { Ok(bytes) => bytes, Err(e) => panic!("pre_upgrade failed: {:?}", e), }; // Execute the migration - let weight = MigrateV0ToV1::::on_runtime_upgrade(); + let weight = InnerMigrateV0ToV1::::on_runtime_upgrade(); // Verify post_upgrade succeeds - assert_ok!(MigrateV0ToV1::::post_upgrade(bytes)); + assert_ok!(InnerMigrateV0ToV1::::post_upgrade(bytes)); // The weight should be just 1 read for trying to access the old value. assert_eq!(weight, ::DbWeight::get().reads(1)); @@ -195,16 +169,16 @@ mod test { v0::Value::::put(initial_value); // Get the pre_upgrade bytes - let bytes = match MigrateV0ToV1::::pre_upgrade() { + let bytes = match InnerMigrateV0ToV1::::pre_upgrade() { Ok(bytes) => bytes, Err(e) => panic!("pre_upgrade failed: {:?}", e), }; // Execute the migration - let weight = MigrateV0ToV1::::on_runtime_upgrade(); + let weight = InnerMigrateV0ToV1::::on_runtime_upgrade(); // Verify post_upgrade succeeds - assert_ok!(MigrateV0ToV1::::post_upgrade(bytes)); + assert_ok!(InnerMigrateV0ToV1::::post_upgrade(bytes)); // The weight used should be 1 read for the old value, and 1 write for the new // value. diff --git a/substrate/frame/grandpa/src/migrations/v5.rs b/substrate/frame/grandpa/src/migrations/v5.rs index 24cfc34104b..a0865a3f2bf 100644 --- a/substrate/frame/grandpa/src/migrations/v5.rs +++ b/substrate/frame/grandpa/src/migrations/v5.rs @@ -20,7 +20,7 @@ use codec::Decode; use frame_support::{ migrations::VersionedMigration, storage, - traits::{Get, OnRuntimeUpgrade}, + traits::{Get, UncheckedOnRuntimeUpgrade}, weights::Weight, }; use sp_consensus_grandpa::AuthorityList; @@ -36,9 +36,9 @@ fn load_authority_list() -> AuthorityList { } /// Actual implementation of [`MigrateV4ToV5`]. -pub struct MigrateImpl(PhantomData); +pub struct UncheckedMigrateImpl(PhantomData); -impl OnRuntimeUpgrade for MigrateImpl { +impl UncheckedOnRuntimeUpgrade for UncheckedMigrateImpl { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { use codec::Encode; @@ -92,5 +92,10 @@ impl OnRuntimeUpgrade for MigrateImpl { /// Migrate the storage from V4 to V5. /// /// Switches from `GRANDPA_AUTHORITIES_KEY` to a normal FRAME storage item. -pub type MigrateV4ToV5 = - VersionedMigration<4, 5, MigrateImpl, Pallet, ::DbWeight>; +pub type MigrateV4ToV5 = VersionedMigration< + 4, + 5, + UncheckedMigrateImpl, + Pallet, + ::DbWeight, +>; diff --git a/substrate/frame/identity/src/migration.rs b/substrate/frame/identity/src/migration.rs index 88ac08d1bf5..8725bfd39df 100644 --- a/substrate/frame/identity/src/migration.rs +++ b/substrate/frame/identity/src/migration.rs @@ -16,7 +16,9 @@ //! Storage migrations for the Identity pallet. use super::*; -use frame_support::{migrations::VersionedMigration, pallet_prelude::*, traits::OnRuntimeUpgrade}; +use frame_support::{ + migrations::VersionedMigration, pallet_prelude::*, traits::UncheckedOnRuntimeUpgrade, +}; #[cfg(feature = "try-runtime")] use codec::{Decode, Encode}; @@ -66,7 +68,7 @@ pub mod v1 { /// prevent stalling a parachain by accumulating too much weight in the migration. To have an /// unlimited migration (e.g. in a chain without PoV limits), set this to `u64::MAX`. pub struct VersionUncheckedMigrateV0ToV1(PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateV0ToV1 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV0ToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { let identities = v0::IdentityOf::::iter().count(); diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index ca9c0874a83..796b310862a 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -17,7 +17,7 @@ use super::*; use crate::log; -use frame_support::traits::OnRuntimeUpgrade; +use frame_support::traits::{OnRuntimeUpgrade, UncheckedOnRuntimeUpgrade}; use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; #[cfg(feature = "try-runtime")] @@ -132,7 +132,7 @@ pub mod v8 { } pub struct VersionUncheckedMigrateV7ToV8(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateV7ToV8 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV7ToV8 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { Ok(Vec::new()) @@ -211,7 +211,7 @@ pub(crate) mod v7 { CountedStorageMap, Twox64Concat, PoolId, V7BondedPoolInner>; pub struct VersionUncheckedMigrateV6ToV7(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateV6ToV7 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV6ToV7 { fn on_runtime_upgrade() -> Weight { let migrated = BondedPools::::count(); // The TVL should be the sum of all the funds that are actively staked and in the @@ -282,7 +282,7 @@ mod v6 { }) } } - impl OnRuntimeUpgrade for MigrateToV6 { + impl UncheckedOnRuntimeUpgrade for MigrateToV6 { fn on_runtime_upgrade() -> Weight { let mut success = 0u64; let mut fail = 0u64; diff --git a/substrate/frame/society/src/migrations.rs b/substrate/frame/society/src/migrations.rs index 8fd87b1163a..7ded1f84f58 100644 --- a/substrate/frame/society/src/migrations.rs +++ b/substrate/frame/society/src/migrations.rs @@ -19,7 +19,7 @@ use super::*; use codec::{Decode, Encode}; -use frame_support::traits::{Defensive, DefensiveOption, Instance, OnRuntimeUpgrade}; +use frame_support::traits::{Defensive, DefensiveOption, Instance, UncheckedOnRuntimeUpgrade}; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; @@ -36,7 +36,7 @@ impl< T: Config, I: Instance + 'static, PastPayouts: Get::AccountId, BalanceOf)>>, - > OnRuntimeUpgrade for VersionUncheckedMigrateToV2 + > UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV2 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index 2ceab44cb16..b8cbcd69048 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -41,19 +41,19 @@ use sp_std::{marker::PhantomData, vec::Vec}; /// It takes 5 type parameters: /// - `From`: The version being upgraded from. /// - `To`: The version being upgraded to. -/// - `Inner`: An implementation of `OnRuntimeUpgrade`. +/// - `Inner`: An implementation of `UncheckedOnRuntimeUpgrade`. /// - `Pallet`: The Pallet being upgraded. /// - `Weight`: The runtime's RuntimeDbWeight implementation. /// /// When a [`VersionedMigration`] `on_runtime_upgrade`, `pre_upgrade`, or `post_upgrade` method is /// called, the on-chain version of the pallet is compared to `From`. If they match, the `Inner` -/// equivalent is called and the pallets on-chain version is set to `To` after the migration. -/// Otherwise, a warning is logged notifying the developer that the upgrade was a noop and should -/// probably be removed. +/// `UncheckedOnRuntimeUpgrade` is called and the pallets on-chain version is set to `To` +/// after the migration. Otherwise, a warning is logged notifying the developer that the upgrade was +/// a noop and should probably be removed. /// -/// It is STRONGLY RECOMMENDED to write the unversioned migration logic in a private module and -/// only export the versioned migration logic to prevent accidentally using the unversioned -/// migration in any runtimes. +/// By not bounding `Inner` with `OnRuntimeUpgrade`, we prevent developers from +/// accidentally using the unchecked version of the migration in a runtime upgrade instead of +/// [`VersionedMigration`]. /// /// ### Examples /// ```ignore @@ -71,9 +71,9 @@ use sp_std::{marker::PhantomData, vec::Vec}; /// /// - https://internals.rust-lang.org/t/lang-team-minutes-private-in-public-rules/4504/40 /// mod version_unchecked { /// use super::*; -/// pub struct MigrateV5ToV6(sp_std::marker::PhantomData); -/// impl OnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { -/// // OnRuntimeUpgrade implementation... +/// pub struct VersionUncheckedMigrateV5ToV6(sp_std::marker::PhantomData); +/// impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { +/// // `UncheckedOnRuntimeUpgrade` implementation... /// } /// } /// @@ -116,7 +116,7 @@ pub enum VersionedPostUpgradeData { impl< const FROM: u16, const TO: u16, - Inner: crate::traits::OnRuntimeUpgrade, + Inner: crate::traits::UncheckedOnRuntimeUpgrade, Pallet: GetStorageVersion + PalletInfoAccess, DbWeight: Get, > crate::traits::OnRuntimeUpgrade for VersionedMigration diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 24e7e1c8a65..66777cef7b8 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -87,7 +87,7 @@ pub use hooks::GenesisBuild; pub use hooks::{ BeforeAllRuntimeMigrations, BuildGenesisConfig, Hooks, IntegrityTest, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnPoll, OnRuntimeUpgrade, OnTimestampSet, PostInherents, - PostTransactions, PreInherents, + PostTransactions, PreInherents, UncheckedOnRuntimeUpgrade, }; pub mod schedule; diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index d83e2704745..ccccc506328 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -227,6 +227,30 @@ pub trait OnRuntimeUpgrade { } } +/// This trait is intended for use within `VersionedMigration` to execute storage migrations without +/// automatic version checks. Implementations should ensure migration logic is safe and idempotent. +pub trait UncheckedOnRuntimeUpgrade { + /// Called within `VersionedMigration` to execute the actual migration. It is also + /// expected that no version checks are performed within this function. + /// + /// See also [`Hooks::on_runtime_upgrade`]. + fn on_runtime_upgrade() -> Weight { + Weight::zero() + } + + /// See [`Hooks::pre_upgrade`]. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + Ok(Vec::new()) + } + + /// See [`Hooks::post_upgrade`]. + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { + Ok(()) + } +} + #[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] @@ -459,7 +483,9 @@ pub trait Hooks { /// ## Implementation Note: Standalone Migrations /// /// Additional migrations can be created by directly implementing [`OnRuntimeUpgrade`] on - /// structs and passing them to `Executive`. + /// structs and passing them to `Executive`. Or alternatively, by implementing + /// [`UncheckedOnRuntimeUpgrade`], passing it to [`crate::migrations::VersionedMigration`], + /// which already implements [`OnRuntimeUpgrade`]. /// /// ## Implementation Note: Pallet Versioning /// diff --git a/substrate/frame/support/test/tests/versioned_migration.rs b/substrate/frame/support/test/tests/versioned_migration.rs index 3fdfb902129..e7d146940cb 100644 --- a/substrate/frame/support/test/tests/versioned_migration.rs +++ b/substrate/frame/support/test/tests/versioned_migration.rs @@ -23,7 +23,7 @@ use frame_support::{ construct_runtime, derive_impl, migrations::VersionedMigration, parameter_types, - traits::{GetStorageVersion, OnRuntimeUpgrade, StorageVersion}, + traits::{GetStorageVersion, OnRuntimeUpgrade, StorageVersion, UncheckedOnRuntimeUpgrade}, weights::constants::RocksDbWeight, }; use frame_system::Config; @@ -103,9 +103,11 @@ parameter_types! { static PostUpgradeCalledWith: Vec = Vec::new(); } -/// Implement `OnRuntimeUpgrade` for `SomeUnversionedMigration`. +/// Implement `UncheckedOnRuntimeUpgrade` for `SomeUnversionedMigration`. /// It sets SomeStorage to S, and returns a weight derived from UpgradeReads and UpgradeWrites. -impl OnRuntimeUpgrade for SomeUnversionedMigration { +impl UncheckedOnRuntimeUpgrade + for SomeUnversionedMigration +{ fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { PreUpgradeCalled::set(true); Ok(PreUpgradeReturnBytes::get().to_vec()) diff --git a/substrate/frame/uniques/src/migration.rs b/substrate/frame/uniques/src/migration.rs index ba0855a6bb6..90d44e7790d 100644 --- a/substrate/frame/uniques/src/migration.rs +++ b/substrate/frame/uniques/src/migration.rs @@ -18,15 +18,15 @@ //! Various pieces of common functionality. use super::*; use core::marker::PhantomData; -use frame_support::traits::{Get, OnRuntimeUpgrade}; +use frame_support::traits::{Get, UncheckedOnRuntimeUpgrade}; mod v1 { use super::*; /// Actual implementation of the storage migration. - pub struct MigrateToV1Impl(PhantomData<(T, I)>); + pub struct UncheckedMigrateToV1Impl(PhantomData<(T, I)>); - impl, I: 'static> OnRuntimeUpgrade for MigrateToV1Impl { + impl, I: 'static> UncheckedOnRuntimeUpgrade for UncheckedMigrateToV1Impl { fn on_runtime_upgrade() -> frame_support::weights::Weight { let mut count = 0; for (collection, detail) in Collection::::iter() { @@ -49,7 +49,7 @@ mod v1 { pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< 0, 1, - v1::MigrateToV1Impl, + v1::UncheckedMigrateToV1Impl, Pallet, ::DbWeight, >; -- GitLab From 5d9826c2620aff205811edf0e6a07b55a52cbf50 Mon Sep 17 00:00:00 2001 From: Clara van Staden Date: Tue, 2 Apr 2024 15:53:05 +0200 Subject: [PATCH 085/128] Snowbridge: Synchronize from Snowfork repository (#3761) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR includes the following 2 improvements: ## Ethereum Client Author: @yrong ### Original Upstream PRs - https://github.com/Snowfork/polkadot-sdk/pull/123 - https://github.com/Snowfork/polkadot-sdk/pull/125 ### Description The Ethereum client syncs beacon headers as they are finalized, and imports every execution header. When a message is received, it is verified against the import execution header. This is unnecessary, since the execution header can be sent with the message as proof. The recent Deneb Ethereum upgrade made it easier to locate the relevant beacon header from an execution header, and so this improvement was made possible. This resolves a concern @svyatonik had in our initial Rococo PR: https://github.com/paritytech/polkadot-sdk/pull/2522#discussion_r1431270691 ## Inbound Queue Author: @yrong ### Original Upstream PR - https://github.com/Snowfork/polkadot-sdk/pull/118 ### Description When the AH sovereign account (who pays relayer rewards) is depleted, the inbound message will not fail. The relayer just will not receive rewards. Both these changes were done by @yrong, many thanks. ❤️ --------- Co-authored-by: claravanstaden Co-authored-by: Ron Co-authored-by: Vincent Geddes Co-authored-by: Svyatoslav Nikolsky --- .../ethereum-client/fixtures/src/lib.rs | 272 ++++++---- .../ethereum-client/src/benchmarking/mod.rs | 18 - .../pallets/ethereum-client/src/impls.rs | 138 +++-- .../pallets/ethereum-client/src/lib.rs | 237 +-------- .../pallets/ethereum-client/src/mock.rs | 61 +-- .../pallets/ethereum-client/src/tests.rs | 474 +++++------------- .../pallets/ethereum-client/src/types.rs | 12 +- .../pallets/ethereum-client/src/weights.rs | 7 - .../fixtures/execution-header-update.json | 54 -- .../tests/fixtures/execution-proof.json | 54 ++ .../fixtures/finalized-header-update.json | 48 +- .../tests/fixtures/inbound-message.json | 94 +++- .../tests/fixtures/initial-checkpoint.json | 30 +- .../tests/fixtures/sync-committee-update.json | 42 +- .../pallets/inbound-queue/fixtures/src/lib.rs | 11 - .../fixtures/src/register_token.rs | 84 +++- .../register_token_with_insufficient_fee.rs | 42 -- .../inbound-queue/fixtures/src/send_token.rs | 82 ++- .../fixtures/src/send_token_to_penpal.rs | 86 +++- .../inbound-queue/src/benchmarking/mod.rs | 4 +- .../pallets/inbound-queue/src/lib.rs | 25 +- .../pallets/inbound-queue/src/mock.rs | 45 +- .../pallets/inbound-queue/src/test.rs | 83 ++- .../snowbridge/primitives/beacon/src/lib.rs | 8 +- .../snowbridge/primitives/beacon/src/types.rs | 100 ++-- .../primitives/beacon/src/updates.rs | 47 +- .../snowbridge/primitives/core/src/inbound.rs | 18 +- .../snowbridge/runtime/test-common/src/lib.rs | 22 - .../snowbridge/scripts/contribute-upstream.sh | 6 + .../bridge-hub-rococo/src/tests/snowbridge.rs | 148 +++--- .../assets/asset-hub-rococo/src/xcm_config.rs | 4 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 13 +- .../snowbridge_pallet_ethereum_client.rs | 22 - .../snowbridge_pallet_inbound_queue.rs | 14 +- prdoc/pr_3761.prdoc | 25 + 35 files changed, 1123 insertions(+), 1307 deletions(-) delete mode 100755 bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-header-update.json create mode 100755 bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-proof.json delete mode 100644 bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token_with_insufficient_fee.rs create mode 100644 prdoc/pr_3761.prdoc diff --git a/bridges/snowbridge/pallets/ethereum-client/fixtures/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/fixtures/src/lib.rs index facaffb8149..37fe45ba60b 100644 --- a/bridges/snowbridge/pallets/ethereum-client/fixtures/src/lib.rs +++ b/bridges/snowbridge/pallets/ethereum-client/fixtures/src/lib.rs @@ -6,9 +6,10 @@ use hex_literal::hex; use snowbridge_beacon_primitives::{ - types::deneb, updates::AncestryProof, BeaconHeader, ExecutionHeaderUpdate, - NextSyncCommitteeUpdate, SyncAggregate, SyncCommittee, VersionedExecutionPayloadHeader, + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, NextSyncCommitteeUpdate, + SyncAggregate, SyncCommittee, VersionedExecutionPayloadHeader, }; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; use sp_core::U256; use sp_std::{boxed::Box, vec}; @@ -20,11 +21,11 @@ type Update = snowbridge_beacon_primitives::Update; pub fn make_checkpoint() -> Box { Box::new(CheckpointUpdate { header: BeaconHeader { - slot: 2496, - proposer_index: 2, - parent_root: hex!("c99e49787106733eeebab4d93eb326e1f2214575c9d928f0c4ab0da0776f1622").into(), - state_root: hex!("fbf8a08c86ef36bd173e37e733da4a78aa8e85fee99a990e858dd12a59087fde").into(), - body_root: hex!("a2a8ad06901447b2807a9059580a4c40d8a941f325b1343c69f7c7c6c90e4ab0").into(), + slot: 864, + proposer_index: 4, + parent_root: hex!("614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614").into(), + state_root: hex!("5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a").into(), + body_root: hex!("0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e").into(), }, current_sync_committee: SyncCommittee { pubkeys: [ @@ -544,20 +545,20 @@ pub fn make_checkpoint() -> Box { aggregate_pubkey: hex!("8fbd66eeec2ff69ef0b836f04b1d67d88bcd4dfd495061964ad757c77abe822a39fa1cd8ed0d4d9bc9276cea73fd745c").into(), }, current_sync_committee_branch: vec![ - hex!("3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59").into(), - hex!("93880225bf99a0c5ec22b266ff829837754e9c5edf37a68c05b8f803fd82fa45").into(), - hex!("4c60656ec9a95fcf11030ad309c716b5b15beb7f60a0bcfc7c9d4eff505472ff").into(), - hex!("22d1645fceb4bf9a695043dda19a53e784ec70df6a6b1bd66ea30eba1cca5f2f").into(), - hex!("a8fc6cad84ceefc633ec56c2d031d525e1cb4b51c70eb252919fce5bba9a1fde").into(), + hex!("3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59").into(), + hex!("a9e90f89e7f90fd5d79a6bbcaf40ba5cfc05ab1b561ac51c84867c32248d5b1e").into(), + hex!("bd1a76b03e02402bb24a627de1980a80ab17691980271f597b844b89b497ef75").into(), + hex!("07bbcd27c7cad089023db046eda17e8209842b7d97add8b873519e84fe6480e7").into(), + hex!("94c11eeee4cb6192bf40810f23486d8c75dfbc2b6f28d988d6f74435ede243b0").into(), ], validators_root: hex!("270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69").into(), - block_roots_root: hex!("d160b7687041891b73e54b06fc4e04f82d0fa8fdd76705895e216c6b24709dfe").into(), + block_roots_root: hex!("b9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10").into(), block_roots_branch: vec![ - hex!("105290e42d98ab6a0ada6e55453cede36c672abf645eeb986b88d7487616e135").into(), - hex!("9da41f274bcdf6122335443d9ce94d07163b48dba3e2f9499ff56f4e48b48b99").into(), - hex!("ecea7e1d3152d8130e83afdfe34b4de4ba2b69a33c9471991096daf454de9cf5").into(), - hex!("b2bf1758e50b2bfff29169fbc70fdb884b2b05bb615dbc53567574da6f4f1ae2").into(), - hex!("cd87069daf70975779126d6af833b7d636c75ca4d5e750ebcad0e76408a5e5bf").into(), + hex!("733422bd810895dab74cbbe07c69dd440cbb51f573181ad4dddac30fcdd0f41f").into(), + hex!("9b9eca73ab01d14549c325ba1b4610bb20bf1f8ec2dbd649f9d8cc7f3cea75fa").into(), + hex!("bcc666ad0ad9f9725cbd682bc95589d35b1b53b2a615f1e6e8dd5e086336becf").into(), + hex!("3069b547a08f703a1715016e926cbd64e71f93f64fb68d98d8c8f1ab745c46e5").into(), + hex!("c2de7e1097239404e17b263cfa0473533cc41e903cb03440d633bc5c27314cb4").into(), ], }) } @@ -567,13 +568,13 @@ pub fn make_sync_committee_update() -> Box { attested_header: BeaconHeader { slot: 129, proposer_index: 5, - parent_root: hex!("e32b6c18f029e755b0273dc1c4fa2bc4979794c8286ad40276c1b8a8e36049d8").into(), - state_root: hex!("5ec9dacf25a5f09f20be0c59246b3d8dcfe64bd085b4bac5cec180690339801e").into(), - body_root: hex!("4080cf2412d6ff77fc3164ad6155423a7112f207f173145ec16371a93f481f87").into(), + parent_root: hex!("c2def03fe44a2802130ca1a6d8406e4ccf4f344fec7075d4d84431cd4a8b0904").into(), + state_root: hex!("fa62cde6666add7353d7aedcb61ebe3c6c84b5361e34f814825b1250affb5be4").into(), + body_root: hex!("0f9c69f243fe7b5fa5860396c66c720a9e8b1e526e7914188930497cc4a9134c").into(), }, sync_aggregate: SyncAggregate{ sync_committee_bits: hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), - sync_committee_signature: hex!("a761c3333fbb3d36bc8f65454f898da38001499dcd37494cf3d86940a995399ae649216ba4c985af154f83f72c8b1856079b7636a7a8d7d3f7602df2cbf699edb72b65253e82de4d9cc4db7377eafb22f799129f63f094a21c00675bdd5cc243").into(), + sync_committee_signature: hex!("810cfde2afea3e276256c09bdf1cd321c33dcadeefddcfd24f488e6f756d917cfda90b5b437b3a4b4ef880985afa28a40cf565ec0a82877ddee36adc01d55d9d4a911ae3e22556e4c2636f1c707366fba019fb49450440fcd263d0b054b04bf0").into(), }, signature_slot: 130, next_sync_committee_update: Some(NextSyncCommitteeUpdate { @@ -1096,34 +1097,34 @@ pub fn make_sync_committee_update() -> Box { }, next_sync_committee_branch: vec![ hex!("3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59").into(), - hex!("fd1e5ff5d4a15081efe3ff17857b1f95984c9a271b1c41c2f81f43e60c2cc541").into(), - hex!("e1c97f93bb7352d395d1ff8ee29881572cb7eb5d71634783701171dcd30cd93d").into(), - hex!("77fa2170ddbd89b15dae02f2e6cf9f76c8e00d1c4217320acffbe01576d0da61").into(), - hex!("e97288e0627219087a024078d69445f34f0583a6350a7c3c40c39fd1fa6f8d68").into(), + hex!("43276bee17fc9fba3f4866e902f0e5b5b308d79db91154bb8bf819973837a7d9").into(), + hex!("5572348e13ce59446ca0ea7cfeed07579da05f121920a76559e19bda94dd81cd").into(), + hex!("2d58adca9f3c742530de037f1933d6de1920ea4b68581613d4bc32b71547f221").into(), + hex!("7072b3c6577cd5a89b3234968f316f54630bb97eafbdb59e5b61637a9640255f").into(), ], }), finalized_header: BeaconHeader{ slot: 64, proposer_index: 4, - parent_root: hex!("0f7bc2353778c14c7f6dba0fc5fe6eec87228b0d3a5447b61dce67b4d9338de3").into(), - state_root: hex!("feb990de653ce494c0a263f820eaf05a9300dbdc30cb6065ede602827bfccde4").into(), - body_root: hex!("f5235cd8c24f2695fc5b7989926305c10ad8cf5a87d62a739f675f5543df2ec1").into(), + parent_root: hex!("a876486aaad7ddb897f369fd22d0a9903cd61d00c9e0dfe7998dd68d1008c678").into(), + state_root: hex!("818e21c3388575f8ccc9ff17ec79d5a57915bcd31bccf47770f65a18e068416b").into(), + body_root: hex!("1d1f73b864b3bb7e11ff91b56ca1381e0f9ca8122b2c542db88243604c763019").into(), }, finality_branch: vec![ hex!("0200000000000000000000000000000000000000000000000000000000000000").into(), hex!("10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7").into(), hex!("98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d").into(), - hex!("e1c97f93bb7352d395d1ff8ee29881572cb7eb5d71634783701171dcd30cd93d").into(), - hex!("77fa2170ddbd89b15dae02f2e6cf9f76c8e00d1c4217320acffbe01576d0da61").into(), - hex!("e97288e0627219087a024078d69445f34f0583a6350a7c3c40c39fd1fa6f8d68").into(), + hex!("5572348e13ce59446ca0ea7cfeed07579da05f121920a76559e19bda94dd81cd").into(), + hex!("2d58adca9f3c742530de037f1933d6de1920ea4b68581613d4bc32b71547f221").into(), + hex!("7072b3c6577cd5a89b3234968f316f54630bb97eafbdb59e5b61637a9640255f").into(), ], - block_roots_root: hex!("6fcdfd1c3fb1bdd421fe59dddfff3855b5ed5e30373887991a0059d019ad12bc").into(), + block_roots_root: hex!("715b08694bef183a6d94b3113d16a7129f89fc3edec85a7e0eaf6ef9153552ef").into(), block_roots_branch: vec![ - hex!("94b59531f172bc24f914bc0c10104ccb158676850f8cc3b47b6ddb7f096ebdd7").into(), - hex!("22470ed9155a938587d44d5fa19217c0f939d8862e504e67cd8cb4d1b960795e").into(), - hex!("feec3ef1a68f93849e71e84f90b99602cccc31868137b6887ca8244a4b979e8e").into(), + hex!("4028c72c71b6ce80ea7d18b2c9471f4e4fa39746261a9921e832a4a2f9bdf7bb").into(), + hex!("75f98062661785d3290b7bd998b64446582baa49210733fd4603e1a97cd45a44").into(), + hex!("6fb757f44052f30c464810f01b0132adfa1a5446d8715b41e9af88eee1ee3e65").into(), hex!("5340ad5877c72dca689ca04bc8fedb78d67a4801d99887937edd8ccd29f87e82").into(), - hex!("f5ff4b0c6190005015889879568f5f0d9c40134c7ec4ffdda47950dcd92395ad").into(), + hex!("f2b3cb56753939a728ccad399a434ca490f018f2f331529ec0d8b2d59c509271").into(), ], }) } @@ -1131,95 +1132,180 @@ pub fn make_sync_committee_update() -> Box { pub fn make_finalized_header_update() -> Box { Box::new(Update { attested_header: BeaconHeader { - slot: 2566, - proposer_index: 6, - parent_root: hex!("6eb9f13a2c496318ce1ab3087bbd872f5c9519a1a7ca8231a2453e3cb523af00").into(), - state_root: hex!("c8cb12766113dff7e46d2917267bf33d0626d99dd47715fcdbc5c65fad3c04b4").into(), - body_root: hex!("d8cfd0d7bc9bc3724417a1655bb0a67c0765ca36197320f4d834150b52ef1420").into(), + slot: 933, + proposer_index: 1, + parent_root: hex!("f5fc63e2780ca302b97aea73fc95d74d702b5afe9a772c2b68f695026337b620").into(), + state_root: hex!("d856d11636bc4d866e78be9e747b222b0977556a367ab42e4085277301438050").into(), + body_root: hex!("5689091ab4eb76c2e876271add4924e1c66ce987c300c24aac2ad8c703e9a33f").into(), }, sync_aggregate: SyncAggregate{ sync_committee_bits: hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), - sync_committee_signature: hex!("9296f9a0387f2cac47008e22ad7c3cd3d49d35384c13e6aa1eacca7dca7c3d2ca81515e50eb3396b9550ed20ef7d8fa2049a186598feb2c00e93728045fcff917733d1827481b8fc95f3913e27fc70112c2490496eb57bb7181f02c3f9fd471f").into(), + sync_committee_signature: hex!("93a3d482fe2a2f7fd2b634169752a8fddf1dc28b23a020b398be8526faf37a74ca0f6db1bed78a9c7256c09a6115235e108e0e8a7ce09287317b0856c4b77dfa5adba6cf4c3ebea5bfa4cd2fcde80fd0a532f2defe65d530201d5d2258796559").into(), }, - signature_slot: 2567, + signature_slot: 934, next_sync_committee_update: None, finalized_header: BeaconHeader { - slot: 2496, - proposer_index: 2, - parent_root: hex!("c99e49787106733eeebab4d93eb326e1f2214575c9d928f0c4ab0da0776f1622").into(), - state_root: hex!("fbf8a08c86ef36bd173e37e733da4a78aa8e85fee99a990e858dd12a59087fde").into(), - body_root: hex!("a2a8ad06901447b2807a9059580a4c40d8a941f325b1343c69f7c7c6c90e4ab0").into(), + slot: 864, + proposer_index: 4, + parent_root: hex!("614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614").into(), + state_root: hex!("5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a").into(), + body_root: hex!("0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e").into(), }, finality_branch: vec![ - hex!("4e00000000000000000000000000000000000000000000000000000000000000").into(), + hex!("1b00000000000000000000000000000000000000000000000000000000000000").into(), hex!("10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7").into(), hex!("98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d").into(), - hex!("958b8e43347f6df6fa5eb3d62d06a862381a6585aa40640dd1c0de11f1cf89c1").into(), - hex!("f107dce04faa86a28fc5d4a618be9cb8d4fc3c23d6c42c3624f3ff4bf6586a03").into(), - hex!("a501cdc02e86969ac3e4d0c5a36f4f049efaa1ab8cb6693f51d130eb52a80f30").into(), + hex!("f12d9aededc72724e417b518fe6f847684f26f81616243dedf8c551cc7d504f5").into(), + hex!("89a85d0907ab3fd6e00ae385f61d456c6191646404ae7b8d23d0e60440cf4d00").into(), + hex!("9fc943b6020eb61d780d78bcc6f6102a81d2c868d58f36e61c6e286a2dc4d8c2").into(), ], - block_roots_root: hex!("d160b7687041891b73e54b06fc4e04f82d0fa8fdd76705895e216c6b24709dfe").into(), + block_roots_root: hex!("b9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10").into(), block_roots_branch: vec![ - hex!("105290e42d98ab6a0ada6e55453cede36c672abf645eeb986b88d7487616e135").into(), - hex!("9da41f274bcdf6122335443d9ce94d07163b48dba3e2f9499ff56f4e48b48b99").into(), - hex!("ecea7e1d3152d8130e83afdfe34b4de4ba2b69a33c9471991096daf454de9cf5").into(), - hex!("b2bf1758e50b2bfff29169fbc70fdb884b2b05bb615dbc53567574da6f4f1ae2").into(), - hex!("cd87069daf70975779126d6af833b7d636c75ca4d5e750ebcad0e76408a5e5bf").into(), + hex!("733422bd810895dab74cbbe07c69dd440cbb51f573181ad4dddac30fcdd0f41f").into(), + hex!("9b9eca73ab01d14549c325ba1b4610bb20bf1f8ec2dbd649f9d8cc7f3cea75fa").into(), + hex!("bcc666ad0ad9f9725cbd682bc95589d35b1b53b2a615f1e6e8dd5e086336becf").into(), + hex!("3069b547a08f703a1715016e926cbd64e71f93f64fb68d98d8c8f1ab745c46e5").into(), + hex!("c2de7e1097239404e17b263cfa0473533cc41e903cb03440d633bc5c27314cb4").into(), ] }) } -pub fn make_execution_header_update() -> Box { - Box::new(ExecutionHeaderUpdate { +pub fn make_execution_proof() -> Box { + Box::new(ExecutionProof { header: BeaconHeader { - slot: 215, - proposer_index: 2, - parent_root: hex!("97518f531a252bb6ca547b21aca9da767943ec99211d3b15c804e34c3a523f45").into(), - state_root: hex!("b088b5a3a8c90d6dc919a695cd7bb0267c6f983ea2e675c559ceb8f46cb90b67").into(), - body_root: hex!("0ba23c8224fdd01531d5ad51486353bd524a0b4c20bca704e26d3210616f829b").into(), + slot: 393, + proposer_index: 4, + parent_root: hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + state_root: hex!("b62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434").into(), + body_root: hex!("04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db").into(), }, ancestry_proof: Some(AncestryProof { header_branch: vec![ - hex!("97518f531a252bb6ca547b21aca9da767943ec99211d3b15c804e34c3a523f45").into(), - hex!("5ce0db996bd499c2b4f7a93263d5aafd052f420efb617cce6fdd54e25516aa45").into(), - hex!("84f0e373b66011ce774c7061440c0a50a51cce2b4b335395eee3e563d605597f").into(), - hex!("48f9ccc5f9594142c18c3b5c39a99f0549329c6ab3ba06c9a50030eadca87770").into(), - hex!("f89d6e311e05bc75a6f63ce118bccce254551f1a88d54c3b4f773f81f946bd99").into(), - hex!("2edd6d893c22636675147c07dfcdb541a146e87c3f15b51c388be4868246dc9b").into(), - hex!("d76b7de5f856e3208a91a42c9c398a7f4fab35e667bf916346050ae742514a2d").into(), - hex!("83a2e233e76385953ca41de4c3afe60471a61f0cc1b3846b4a0670e3e563b747").into(), - hex!("e783a5a109c2ad74e4eb53e8f6b11b31266a92a9e16c1fd5873109c5d41b282c").into(), - hex!("d4ea1ef3869ee6a0fd0b19d7d70027d144eecd4f1d32cbf47632a0a9069164b9").into(), - hex!("f8179564b58eb93a850d35e4156a04db651106442ad891c3e85155c1762792f1").into(), - hex!("4cbb1edb48cf1e32fb30db60aaaeaf6190ffe4d0c8dbc96cec307daecb78be12").into(), + hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + hex!("fa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3").into(), + hex!("cadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d").into(), + hex!("33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c").into(), + hex!("2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf").into(), + hex!("e1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1").into(), + hex!("aa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97").into(), + hex!("160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f").into(), + hex!("f68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535").into(), + hex!("1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), ], - finalized_block_root: hex!("890a7f23b9ed2160901654be9efc575d6830ca860e2a97866ae3423fb7bd7231").into(), + finalized_block_root: hex!("751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46").into(), }), execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { - parent_hash: hex!("d82ec63f5c5e6ba61d62f09c188f158e6449b94bdcc31941e68639eec3c4cf7a").into(), + parent_hash: hex!("8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2").into(), fee_recipient: hex!("0000000000000000000000000000000000000000").into(), - state_root: hex!("8b65545fe5f3216b47b6339b9c91ca2b7f1032a970b04246d9e9fb4460ee34c3").into(), - receipts_root: hex!("7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095").into(), - logs_bloom: hex!("00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010").into(), - prev_randao: hex!("6d9e2a012d82b1b6cb0a2c1c1ed24cc16dbb56e6e39ae545371e0666ab057862").into(), - block_number: 215, - gas_limit: 64842908, - gas_used: 119301, - timestamp: 1705859527, - extra_data: hex!("d983010d0a846765746888676f312e32312e358664617277696e").into(), + state_root: hex!("96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b").into(), + receipts_root: hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010").into(), + prev_randao: hex!("62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67").into(), + block_number: 393, + gas_limit: 54492273, + gas_used: 199644, + timestamp: 1710552813, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), base_fee_per_gas: U256::from(7u64), - block_hash: hex!("48498dbfbcfae53a7f4c289ee00747aceea925f6260c50ead5a33e1c55c40f98").into(), - transactions_root: hex!("5ebc1347fe3df0611d4f66b19bd8e1c6f4eaed0371d850f14c83b1c77ea234e6").into(), + block_hash: hex!("6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131").into(), + transactions_root: hex!("2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d").into(), withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), blob_gas_used: 0, excess_blob_gas: 0, }), execution_branch: vec![ - hex!("f8c69d3830406d668619bcccc13c8dddde41e863326f7418b241d5924c4ad34a").into(), + hex!("a6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d").into(), hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), - hex!("f4d6b5cf9c6e212615c3674fa625d04eb1114153fb221ef5ad02aa433fc67cfb").into(), + hex!("d3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da").into(), ], }) } + +pub fn make_inbound_fixture() -> InboundQueueFixture { + InboundQueueFixture { + message: Message { + event_log: Log { + address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), + topics: vec![ + hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), + hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), + ], + data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").into(), + }, + proof: Proof { + receipt_proof: (vec![ + hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").to_vec(), + hex!("4a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f").to_vec(), + ], vec![ + hex!("f851a09c01dd6d2d8de951c45af23d3ad00829ce021c04d6c8acbe1612d456ee320d4980808080808080a04a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f8080808080808080").to_vec(), + hex!("f9028c30b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").to_vec(), + ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 393, + proposer_index: 4, + parent_root: hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + state_root: hex!("b62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434").into(), + body_root: hex!("04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + hex!("fa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3").into(), + hex!("cadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d").into(), + hex!("33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c").into(), + hex!("2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf").into(), + hex!("e1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1").into(), + hex!("aa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97").into(), + hex!("160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f").into(), + hex!("f68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535").into(), + hex!("1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), + hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), + ], + finalized_block_root: hex!("751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b").into(), + receipts_root: hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010").into(), + prev_randao: hex!("62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67").into(), + block_number: 393, + gas_limit: 54492273, + gas_used: 199644, + timestamp: 1710552813, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131").into(), + transactions_root: hex!("2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("a6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("d3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da").into(), + ], + } + }, + }, + finalized_header: BeaconHeader { + slot: 864, + proposer_index: 4, + parent_root: hex!("614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614").into(), + state_root: hex!("5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a").into(), + body_root: hex!("0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e").into(), + }, + block_roots_root: hex!("b9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10").into(), + } +} diff --git a/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs b/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs index e1520cd7153..4b8796b628d 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs @@ -65,24 +65,6 @@ mod benchmarks { Ok(()) } - #[benchmark] - fn submit_execution_header() -> Result<(), BenchmarkError> { - let caller: T::AccountId = whitelisted_caller(); - let checkpoint_update = make_checkpoint(); - let finalized_header_update = make_finalized_header_update(); - let execution_header_update = make_execution_header_update(); - let execution_header_hash = execution_header_update.execution_header.block_hash(); - EthereumBeaconClient::::process_checkpoint_update(&checkpoint_update)?; - EthereumBeaconClient::::process_update(&finalized_header_update)?; - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), Box::new(*execution_header_update)); - - assert!(>::contains_key(execution_header_hash)); - - Ok(()) - } - #[benchmark(extra)] fn bls_fast_aggregate_verify_pre_aggregated() -> Result<(), BenchmarkError> { EthereumBeaconClient::::process_checkpoint_update(&make_checkpoint())?; diff --git a/bridges/snowbridge/pallets/ethereum-client/src/impls.rs b/bridges/snowbridge/pallets/ethereum-client/src/impls.rs index 300431d8770..f600b1f67e2 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/impls.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/impls.rs @@ -1,6 +1,8 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork use super::*; +use frame_support::ensure; +use primitives::ExecutionProof; use snowbridge_core::inbound::{ VerificationError::{self, *}, @@ -14,32 +16,13 @@ impl Verifier for Pallet { /// the log should be in the beacon client storage, meaning it has been verified and is an /// ancestor of a finalized beacon block. fn verify(event_log: &Log, proof: &Proof) -> Result<(), VerificationError> { - log::info!( - target: "ethereum-client", - "💫 Verifying message with block hash {}", - proof.block_hash, - ); + Self::verify_execution_proof(&proof.execution_proof) + .map_err(|e| InvalidExecutionProof(e.into()))?; - let header = >::get(proof.block_hash).ok_or(HeaderNotFound)?; - - let receipt = match Self::verify_receipt_inclusion(header.receipts_root, proof) { - Ok(receipt) => receipt, - Err(err) => { - log::error!( - target: "ethereum-client", - "💫 Verification of receipt inclusion failed for block {}: {:?}", - proof.block_hash, - err - ); - return Err(err) - }, - }; - - log::trace!( - target: "ethereum-client", - "💫 Verified receipt inclusion for transaction at index {} in block {}", - proof.tx_index, proof.block_hash, - ); + let receipt = Self::verify_receipt_inclusion( + proof.execution_proof.execution_header.receipts_root(), + &proof.receipt_proof.1, + )?; event_log.validate().map_err(|_| InvalidLog)?; @@ -53,18 +36,11 @@ impl Verifier for Pallet { if !receipt.contains_log(&event_log) { log::error!( target: "ethereum-client", - "💫 Event log not found in receipt for transaction at index {} in block {}", - proof.tx_index, proof.block_hash, + "💫 Event log not found in receipt for transaction", ); return Err(LogNotFound) } - log::info!( - target: "ethereum-client", - "💫 Receipt verification successful for {}", - proof.block_hash, - ); - Ok(()) } } @@ -74,9 +50,9 @@ impl Pallet { /// `proof.block_hash`. pub fn verify_receipt_inclusion( receipts_root: H256, - proof: &Proof, + receipt_proof: &[Vec], ) -> Result { - let result = verify_receipt_proof(receipts_root, &proof.data.1).ok_or(InvalidProof)?; + let result = verify_receipt_proof(receipts_root, receipt_proof).ok_or(InvalidProof)?; match result { Ok(receipt) => Ok(receipt), @@ -90,4 +66,96 @@ impl Pallet { }, } } + + /// Validates an execution header with ancestry_proof against a finalized checkpoint on + /// chain.The beacon header containing the execution header is sent, plus the execution header, + /// along with a proof that the execution header is rooted in the beacon header body. + pub(crate) fn verify_execution_proof(execution_proof: &ExecutionProof) -> DispatchResult { + let latest_finalized_state = + FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()) + .ok_or(Error::::NotBootstrapped)?; + // Checks that the header is an ancestor of a finalized header, using slot number. + ensure!( + execution_proof.header.slot <= latest_finalized_state.slot, + Error::::HeaderNotFinalized + ); + + // Gets the hash tree root of the execution header, in preparation for the execution + // header proof (used to check that the execution header is rooted in the beacon + // header body. + let execution_header_root: H256 = execution_proof + .execution_header + .hash_tree_root() + .map_err(|_| Error::::BlockBodyHashTreeRootFailed)?; + + ensure!( + verify_merkle_branch( + execution_header_root, + &execution_proof.execution_branch, + config::EXECUTION_HEADER_SUBTREE_INDEX, + config::EXECUTION_HEADER_DEPTH, + execution_proof.header.body_root + ), + Error::::InvalidExecutionHeaderProof + ); + + let beacon_block_root: H256 = execution_proof + .header + .hash_tree_root() + .map_err(|_| Error::::HeaderHashTreeRootFailed)?; + + match &execution_proof.ancestry_proof { + Some(proof) => { + Self::verify_ancestry_proof( + beacon_block_root, + execution_proof.header.slot, + &proof.header_branch, + proof.finalized_block_root, + )?; + }, + None => { + // If the ancestry proof is not provided, we expect this beacon header to be a + // finalized beacon header. We need to check that the header hash matches the + // finalized header root at the expected slot. + let state = >::get(beacon_block_root) + .ok_or(Error::::ExpectedFinalizedHeaderNotStored)?; + if execution_proof.header.slot != state.slot { + return Err(Error::::ExpectedFinalizedHeaderNotStored.into()) + } + }, + } + + Ok(()) + } + + /// Verify that `block_root` is an ancestor of `finalized_block_root` Used to prove that + /// an execution header is an ancestor of a finalized header (i.e. the blocks are + /// on the same chain). + fn verify_ancestry_proof( + block_root: H256, + block_slot: u64, + block_root_proof: &[H256], + finalized_block_root: H256, + ) -> DispatchResult { + let state = >::get(finalized_block_root) + .ok_or(Error::::ExpectedFinalizedHeaderNotStored)?; + + ensure!(block_slot < state.slot, Error::::HeaderNotFinalized); + + let index_in_array = block_slot % (SLOTS_PER_HISTORICAL_ROOT as u64); + let leaf_index = (SLOTS_PER_HISTORICAL_ROOT as u64) + index_in_array; + + ensure!( + verify_merkle_branch( + block_root, + block_root_proof, + leaf_index as usize, + config::BLOCK_ROOT_AT_INDEX_DEPTH, + state.block_roots_root + ), + Error::::InvalidAncestryMerkleProof + ); + + Ok(()) + } } diff --git a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs index f57f5199020..c1b9e19729b 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs @@ -15,8 +15,6 @@ //! ## Consensus Updates //! //! * [`Call::submit`]: Submit a finalized beacon header with an optional sync committee update -//! * [`Call::submit_execution_header`]: Submit an execution header together with an ancestry proof -//! that can be verified against an already imported finalized beacon header. #![cfg_attr(not(feature = "std"), no_std)] pub mod config; @@ -40,8 +38,7 @@ use frame_support::{ use frame_system::ensure_signed; use primitives::{ fast_aggregate_verify, verify_merkle_branch, verify_receipt_proof, BeaconHeader, BlsError, - CompactBeaconState, CompactExecutionHeader, ExecutionHeaderState, ForkData, ForkVersion, - ForkVersions, PublicKeyPrepared, SigningData, + CompactBeaconState, ForkData, ForkVersion, ForkVersions, PublicKeyPrepared, SigningData, }; use snowbridge_core::{BasicOperatingMode, RingBufferMap}; use sp_core::H256; @@ -51,11 +48,7 @@ pub use weights::WeightInfo; use functions::{ compute_epoch, compute_period, decompress_sync_committee_bits, sync_committee_sum, }; -pub use types::ExecutionHeaderBuffer; -use types::{ - CheckpointUpdate, ExecutionHeaderUpdate, FinalizedBeaconStateBuffer, SyncCommitteePrepared, - Update, -}; +use types::{CheckpointUpdate, FinalizedBeaconStateBuffer, SyncCommitteePrepared, Update}; pub use pallet::*; @@ -76,10 +69,7 @@ pub mod pallet { pub struct MaxFinalizedHeadersToKeep(PhantomData); impl Get for MaxFinalizedHeadersToKeep { fn get() -> u32 { - // Consider max latency allowed between LatestFinalizedState and LatestExecutionState is - // the total slots in one sync_committee_period so 1 should be fine we keep 2 periods - // here for redundancy. - const MAX_REDUNDANCY: u32 = 2; + const MAX_REDUNDANCY: u32 = 20; config::EPOCHS_PER_SYNC_COMMITTEE_PERIOD as u32 * MAX_REDUNDANCY } } @@ -92,9 +82,6 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; #[pallet::constant] type ForkVersions: Get; - /// Maximum number of execution headers to keep - #[pallet::constant] - type MaxExecutionHeadersToKeep: Get; type WeightInfo: WeightInfo; } @@ -105,10 +92,6 @@ pub mod pallet { block_hash: H256, slot: u64, }, - ExecutionHeaderImported { - block_hash: H256, - block_number: u64, - }, SyncCommitteeUpdated { period: u64, }, @@ -191,25 +174,6 @@ pub mod pallet { pub(super) type NextSyncCommittee = StorageValue<_, SyncCommitteePrepared, ValueQuery>; - /// Latest imported execution header - #[pallet::storage] - #[pallet::getter(fn latest_execution_state)] - pub(super) type LatestExecutionState = - StorageValue<_, ExecutionHeaderState, ValueQuery>; - - /// Execution Headers - #[pallet::storage] - pub type ExecutionHeaders = - StorageMap<_, Identity, H256, CompactExecutionHeader, OptionQuery>; - - /// Execution Headers: Current position in ring buffer - #[pallet::storage] - pub type ExecutionHeaderIndex = StorageValue<_, u32, ValueQuery>; - - /// Execution Headers: Mapping of ring buffer index to a pruning candidate - #[pallet::storage] - pub type ExecutionHeaderMapping = StorageMap<_, Identity, u32, H256, ValueQuery>; - /// The current operating mode of the pallet. #[pallet::storage] #[pallet::getter(fn operating_mode)] @@ -248,21 +212,6 @@ pub mod pallet { Ok(()) } - #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::submit_execution_header())] - #[transactional] - /// Submits a new execution header update. The relevant related beacon header - /// is also included to prove the execution header, as well as ancestry proof data. - pub fn submit_execution_header( - origin: OriginFor, - update: Box, - ) -> DispatchResult { - ensure_signed(origin)?; - ensure!(!Self::operating_mode().is_halted(), Error::::Halted); - Self::process_execution_header_update(&update)?; - Ok(()) - } - /// Halt or resume all pallet operations. May only be called by root. #[pallet::call_index(3)] #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] @@ -325,41 +274,19 @@ pub mod pallet { >::set(sync_committee_prepared); >::kill(); InitialCheckpointRoot::::set(header_root); - >::kill(); Self::store_validators_root(update.validators_root); - Self::store_finalized_header(header_root, update.header, update.block_roots_root)?; + Self::store_finalized_header(update.header, update.block_roots_root)?; Ok(()) } pub(crate) fn process_update(update: &Update) -> DispatchResult { - Self::cross_check_execution_state()?; Self::verify_update(update)?; Self::apply_update(update)?; Ok(()) } - /// Cross check to make sure that execution header import does not fall too far behind - /// finalised beacon header import. If that happens just return an error and pause - /// processing until execution header processing has caught up. - pub(crate) fn cross_check_execution_state() -> DispatchResult { - let latest_finalized_state = - FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()) - .ok_or(Error::::NotBootstrapped)?; - let latest_execution_state = Self::latest_execution_state(); - // The execution header import should be at least within the slot range of a sync - // committee period. - let max_latency = config::EPOCHS_PER_SYNC_COMMITTEE_PERIOD * config::SLOTS_PER_EPOCH; - ensure!( - latest_execution_state.beacon_slot == 0 || - latest_finalized_state.slot < - latest_execution_state.beacon_slot + max_latency as u64, - Error::::ExecutionHeaderTooFarBehind - ); - Ok(()) - } - /// References and strictly follows /// Verifies that provided next sync committee is valid through a series of checks /// (including checking that a sync committee period isn't skipped and that the header is @@ -534,130 +461,12 @@ pub mod pallet { }; if update.finalized_header.slot > latest_finalized_state.slot { - let finalized_block_root: H256 = update - .finalized_header - .hash_tree_root() - .map_err(|_| Error::::HeaderHashTreeRootFailed)?; - Self::store_finalized_header( - finalized_block_root, - update.finalized_header, - update.block_roots_root, - )?; + Self::store_finalized_header(update.finalized_header, update.block_roots_root)?; } Ok(()) } - /// Validates an execution header for import. The beacon header containing the execution - /// header is sent, plus the execution header, along with a proof that the execution header - /// is rooted in the beacon header body. - pub(crate) fn process_execution_header_update( - update: &ExecutionHeaderUpdate, - ) -> DispatchResult { - let latest_finalized_state = - FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()) - .ok_or(Error::::NotBootstrapped)?; - // Checks that the header is an ancestor of a finalized header, using slot number. - ensure!( - update.header.slot <= latest_finalized_state.slot, - Error::::HeaderNotFinalized - ); - - // Checks that we don't skip execution headers, they need to be imported sequentially. - let latest_execution_state: ExecutionHeaderState = Self::latest_execution_state(); - ensure!( - latest_execution_state.block_number == 0 || - update.execution_header.block_number() == - latest_execution_state.block_number + 1, - Error::::ExecutionHeaderSkippedBlock - ); - - // Gets the hash tree root of the execution header, in preparation for the execution - // header proof (used to check that the execution header is rooted in the beacon - // header body. - let execution_header_root: H256 = update - .execution_header - .hash_tree_root() - .map_err(|_| Error::::BlockBodyHashTreeRootFailed)?; - - ensure!( - verify_merkle_branch( - execution_header_root, - &update.execution_branch, - config::EXECUTION_HEADER_SUBTREE_INDEX, - config::EXECUTION_HEADER_DEPTH, - update.header.body_root - ), - Error::::InvalidExecutionHeaderProof - ); - - let block_root: H256 = update - .header - .hash_tree_root() - .map_err(|_| Error::::HeaderHashTreeRootFailed)?; - - match &update.ancestry_proof { - Some(proof) => { - Self::verify_ancestry_proof( - block_root, - update.header.slot, - &proof.header_branch, - proof.finalized_block_root, - )?; - }, - None => { - // If the ancestry proof is not provided, we expect this header to be a - // finalized header. We need to check that the header hash matches the finalized - // header root at the expected slot. - let state = >::get(block_root) - .ok_or(Error::::ExpectedFinalizedHeaderNotStored)?; - if update.header.slot != state.slot { - return Err(Error::::ExpectedFinalizedHeaderNotStored.into()) - } - }, - } - - Self::store_execution_header( - update.execution_header.block_hash(), - update.execution_header.clone().into(), - update.header.slot, - block_root, - ); - - Ok(()) - } - - /// Verify that `block_root` is an ancestor of `finalized_block_root` Used to prove that - /// an execution header is an ancestor of a finalized header (i.e. the blocks are - /// on the same chain). - fn verify_ancestry_proof( - block_root: H256, - block_slot: u64, - block_root_proof: &[H256], - finalized_block_root: H256, - ) -> DispatchResult { - let state = >::get(finalized_block_root) - .ok_or(Error::::ExpectedFinalizedHeaderNotStored)?; - - ensure!(block_slot < state.slot, Error::::HeaderNotFinalized); - - let index_in_array = block_slot % (SLOTS_PER_HISTORICAL_ROOT as u64); - let leaf_index = (SLOTS_PER_HISTORICAL_ROOT as u64) + index_in_array; - - ensure!( - verify_merkle_branch( - block_root, - block_root_proof, - leaf_index as usize, - config::BLOCK_ROOT_AT_INDEX_DEPTH, - state.block_roots_root - ), - Error::::InvalidAncestryMerkleProof - ); - - Ok(()) - } - /// Computes the signing root for a given beacon header and domain. The hash tree root /// of the beacon header is computed, and then the combination of the beacon header hash /// and the domain makes up the signing root. @@ -679,13 +488,15 @@ pub mod pallet { /// Stores a compacted (slot and block roots root (hash of the `block_roots` beacon state /// field, used for ancestry proof)) beacon state in a ring buffer map, with the header root /// as map key. - fn store_finalized_header( - header_root: H256, + pub fn store_finalized_header( header: BeaconHeader, block_roots_root: H256, ) -> DispatchResult { let slot = header.slot; + let header_root: H256 = + header.hash_tree_root().map_err(|_| Error::::HeaderHashTreeRootFailed)?; + >::insert( header_root, CompactBeaconState { slot: header.slot, block_roots_root }, @@ -704,36 +515,6 @@ pub mod pallet { Ok(()) } - /// Stores the provided execution header in pallet storage. The header is stored - /// in a ring buffer map, with the block hash as map key. The last imported execution - /// header is also kept in storage, for the relayer to check import progress. - pub fn store_execution_header( - block_hash: H256, - header: CompactExecutionHeader, - beacon_slot: u64, - beacon_block_root: H256, - ) { - let block_number = header.block_number; - - >::insert(block_hash, header); - - log::trace!( - target: LOG_TARGET, - "💫 Updated latest execution block at {} to number {}.", - block_hash, - block_number - ); - - LatestExecutionState::::mutate(|s| { - s.beacon_block_root = beacon_block_root; - s.beacon_slot = beacon_slot; - s.block_hash = block_hash; - s.block_number = block_number; - }); - - Self::deposit_event(Event::ExecutionHeaderImported { block_hash, block_number }); - } - /// Stores the validators root in storage. Validators root is the hash tree root of all the /// validators at genesis and is used to used to identify the chain that we are on /// (used in conjunction with the fork version). diff --git a/bridges/snowbridge/pallets/ethereum-client/src/mock.rs b/bridges/snowbridge/pallets/ethereum-client/src/mock.rs index 799b14f4773..bd6144ebd8f 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/mock.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/mock.rs @@ -2,12 +2,13 @@ // SPDX-FileCopyrightText: 2023 Snowfork use crate as ethereum_beacon_client; use crate::config; -use frame_support::{derive_impl, parameter_types}; -use hex_literal::hex; +use frame_support::{derive_impl, dispatch::DispatchResult, parameter_types}; use pallet_timestamp; -use primitives::{CompactExecutionHeader, Fork, ForkVersions}; +use primitives::{Fork, ForkVersions}; use snowbridge_core::inbound::{Log, Proof}; +use sp_std::default::Default; use std::{fs::File, path::PathBuf}; + type Block = frame_system::mocking::MockBlock; use sp_runtime::BuildStorage; @@ -20,8 +21,8 @@ where serde_json::from_reader(File::open(filepath).unwrap()) } -pub fn load_execution_header_update_fixture() -> primitives::ExecutionHeaderUpdate { - load_fixture("execution-header-update.json".to_string()).unwrap() +pub fn load_execution_proof_fixture() -> primitives::ExecutionProof { + load_fixture("execution-proof.json".to_string()).unwrap() } pub fn load_checkpoint_update_fixture( @@ -50,41 +51,8 @@ pub fn load_next_finalized_header_update_fixture( } pub fn get_message_verification_payload() -> (Log, Proof) { - ( - Log { - address: hex!("ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0").into(), - topics: vec![ - hex!("1b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ad").into(), - hex!("00000000000000000000000000000000000000000000000000000000000003e8").into(), - hex!("0000000000000000000000000000000000000000000000000000000000000001").into(), - ], - data: hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000").into(), - }, - Proof { - block_hash: hex!("05aaa60b0f27cce9e71909508527264b77ee14da7b5bf915fcc4e32715333213").into(), - tx_index: 0, - data: (vec![ - hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb").to_vec(), - hex!("d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c185510").to_vec(), - hex!("b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646").to_vec(), - ], vec![ - hex!("f90131a0b601337b3aa10a671caa724eba641e759399979856141d3aea6b6b4ac59b889ba00c7d5dd48be9060221a02fb8fa213860b4c50d47046c8fa65ffaba5737d569e0a094601b62a1086cd9c9cb71a7ebff9e718f3217fd6e837efe4246733c0a196f63a06a4b0dd0aefc37b3c77828c8f07d1b7a2455ceb5dbfd3c77d7d6aeeddc2f7e8ca0d6e8e23142cdd8ec219e1f5d8b56aa18e456702b195deeaa210327284d42ade4a08a313d4c87023005d1ab631bbfe3f5de1e405d0e66d0bef3e033f1e5711b5521a0bf09a5d9a48b10ade82b8d6a5362a15921c8b5228a3487479b467db97411d82fa0f95cccae2a7c572ef3c566503e30bac2b2feb2d2f26eebf6d870dcf7f8cf59cea0d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c1855108080808080808080").to_vec(), - hex!("f851a0b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646a060a634b9280e3a23fb63375e7bbdd9ab07fd379ab6a67e2312bbc112195fa358808080808080808080808080808080").to_vec(), - hex!("f9030820b9030402f90300018301d6e2b9010000000000000800000000000020040008000000000000000000000000400000008000000000000000000000000000000000000000000000000000000000042010000000001000000000000000000000000000000000040000000000000000000000000000000000000000000000008000000000000000002000000000000000000000000200000000000000200000000000100000000040000001000200008000000000000200000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000f901f5f87a942ffa5ecdbe006d30397c7636d3e015eee251369ff842a0c965575a00553e094ca7c5d14f02e107c258dda06867cbf9e0e69f80e71bbcc1a000000000000000000000000000000000000000000000000000000000000003e8a000000000000000000000000000000000000000000000000000000000000003e8f9011c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000001b8a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000f858948cf6147918a5cbb672703f879f385036f8793a24e1a01449abf21e49fd025f33495e77f7b1461caefdd3d4bb646424a3f445c4576a5ba0000000000000000000000000440edffa1352b13227e8ee646f3ea37456dec701").to_vec(), - ]), - } - ) -} - -pub fn get_message_verification_header() -> CompactExecutionHeader { - CompactExecutionHeader { - parent_hash: hex!("04a7f6ab8282203562c62f38b0ab41d32aaebe2c7ea687702b463148a6429e04") - .into(), - block_number: 55, - state_root: hex!("894d968712976d613519f973a317cb0781c7b039c89f27ea2b7ca193f7befdb3").into(), - receipts_root: hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb") - .into(), - } + let inbound_fixture = snowbridge_pallet_ethereum_client_fixtures::make_inbound_fixture(); + (inbound_fixture.message.event_log, inbound_fixture.message.proof) } frame_support::construct_runtime!( @@ -130,20 +98,25 @@ parameter_types! { epoch: 0, } }; - pub const ExecutionHeadersPruneThreshold: u32 = 8192; } impl ethereum_beacon_client::Config for Test { type RuntimeEvent = RuntimeEvent; type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; type WeightInfo = (); } // Build genesis storage according to the mock runtime. pub fn new_tester() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); + let ext = sp_io::TestExternalities::new(t); ext } + +pub fn initialize_storage() -> DispatchResult { + let inbound_fixture = snowbridge_pallet_ethereum_client_fixtures::make_inbound_fixture(); + EthereumBeaconClient::store_finalized_header( + inbound_fixture.finalized_header, + inbound_fixture.block_roots_root, + ) +} diff --git a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs index 20a184490fd..765958c1282 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs @@ -1,14 +1,13 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork use crate::{ - functions::compute_period, pallet::ExecutionHeaders, sync_committee_sum, verify_merkle_branch, - BeaconHeader, CompactBeaconState, Error, ExecutionHeaderBuffer, FinalizedBeaconState, - LatestExecutionState, LatestFinalizedBlockRoot, NextSyncCommittee, SyncCommitteePrepared, + functions::compute_period, sync_committee_sum, verify_merkle_branch, BeaconHeader, + CompactBeaconState, Error, FinalizedBeaconState, LatestFinalizedBlockRoot, NextSyncCommittee, + SyncCommitteePrepared, }; use crate::mock::{ - get_message_verification_header, get_message_verification_payload, - load_checkpoint_update_fixture, load_execution_header_update_fixture, + get_message_verification_payload, load_checkpoint_update_fixture, load_finalized_header_update_fixture, load_next_finalized_header_update_fixture, load_next_sync_committee_update_fixture, load_sync_committee_update_fixture, }; @@ -19,14 +18,9 @@ use crate::config::{EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SLOTS_PER_EPOCH, SLOTS_PER use frame_support::{assert_err, assert_noop, assert_ok}; use hex_literal::hex; use primitives::{ - CompactExecutionHeader, ExecutionHeaderState, Fork, ForkVersions, NextSyncCommitteeUpdate, - VersionedExecutionPayloadHeader, -}; -use rand::{thread_rng, Rng}; -use snowbridge_core::{ - inbound::{VerificationError, Verifier}, - RingBufferMap, + types::deneb, Fork, ForkVersions, NextSyncCommitteeUpdate, VersionedExecutionPayloadHeader, }; +use snowbridge_core::inbound::{VerificationError, Verifier}; use sp_core::H256; use sp_runtime::DispatchError; @@ -212,61 +206,6 @@ pub fn sync_committee_participation_is_supermajority_errors_when_not_supermajori }); } -#[test] -pub fn execution_header_pruning() { - new_tester().execute_with(|| { - let execution_header_prune_threshold = ExecutionHeadersPruneThreshold::get(); - let to_be_deleted = execution_header_prune_threshold / 2; - - let mut stored_hashes = vec![]; - - for i in 0..execution_header_prune_threshold { - let mut hash = H256::default(); - thread_rng().try_fill(&mut hash.0[..]).unwrap(); - EthereumBeaconClient::store_execution_header( - hash, - CompactExecutionHeader::default(), - i as u64, - hash, - ); - stored_hashes.push(hash); - } - - // We should have stored everything until now - assert_eq!({ ExecutionHeaders::::iter().count() }, stored_hashes.len()); - - // Let's push extra entries so that some of the previous entries are deleted. - for i in 0..to_be_deleted { - let mut hash = H256::default(); - thread_rng().try_fill(&mut hash.0[..]).unwrap(); - EthereumBeaconClient::store_execution_header( - hash, - CompactExecutionHeader::default(), - (i + execution_header_prune_threshold) as u64, - hash, - ); - - stored_hashes.push(hash); - } - - // We should have only stored up to `execution_header_prune_threshold` - assert_eq!( - ExecutionHeaders::::iter().count() as u32, - execution_header_prune_threshold - ); - - // First `to_be_deleted` items must be deleted - for i in 0..to_be_deleted { - assert!(!ExecutionHeaders::::contains_key(stored_hashes[i as usize])); - } - - // Other entries should be part of data - for i in to_be_deleted..(to_be_deleted + execution_header_prune_threshold) { - assert!(ExecutionHeaders::::contains_key(stored_hashes[i as usize])); - } - }); -} - #[test] fn compute_fork_version() { let mock_fork_versions = ForkVersions { @@ -348,34 +287,6 @@ fn find_present_keys() { }); } -#[test] -fn cross_check_execution_state() { - new_tester().execute_with(|| { - let header_root: H256 = TEST_HASH.into(); - >::insert( - header_root, - CompactBeaconState { - // set slot to period 5 - slot: ((EPOCHS_PER_SYNC_COMMITTEE_PERIOD * SLOTS_PER_EPOCH) * 5) as u64, - block_roots_root: Default::default(), - }, - ); - LatestFinalizedBlockRoot::::set(header_root); - >::set(ExecutionHeaderState { - beacon_block_root: Default::default(), - // set slot to period 2 - beacon_slot: ((EPOCHS_PER_SYNC_COMMITTEE_PERIOD * SLOTS_PER_EPOCH) * 2) as u64, - block_hash: Default::default(), - block_number: 0, - }); - - assert_err!( - EthereumBeaconClient::cross_check_execution_state(), - Error::::ExecutionHeaderTooFarBehind - ); - }); -} - /* SYNC PROCESS TESTS */ #[test] @@ -608,40 +519,6 @@ fn submit_update_with_skipped_sync_committee_period() { }); } -#[test] -fn submit_update_execution_headers_too_far_behind() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let execution_header_update = Box::new(load_execution_header_update_fixture()); - let next_update = Box::new(load_next_sync_committee_update_fixture()); - - new_tester().execute_with(|| { - let far_ahead_finalized_header_slot = finalized_header_update.finalized_header.slot + - (EPOCHS_PER_SYNC_COMMITTEE_PERIOD * SLOTS_PER_EPOCH * 2) as u64; - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_ok!(EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - )); - - let header_root: H256 = TEST_HASH.into(); - >::insert( - header_root, - CompactBeaconState { - slot: far_ahead_finalized_header_slot, - block_roots_root: Default::default(), - }, - ); - LatestFinalizedBlockRoot::::set(header_root); - - assert_err!( - EthereumBeaconClient::submit(RuntimeOrigin::signed(1), next_update), - Error::::ExecutionHeaderTooFarBehind - ); - }); -} - #[test] fn submit_irrelevant_update() { let checkpoint = Box::new(load_checkpoint_update_fixture()); @@ -703,187 +580,6 @@ fn submit_update_with_invalid_sync_committee_update() { }); } -#[test] -fn submit_execution_header_update() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let execution_header_update = Box::new(load_execution_header_update_fixture()); - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_ok!(EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update.clone() - )); - assert!(>::contains_key( - execution_header_update.execution_header.block_hash() - )); - }); -} - -#[test] -fn submit_execution_header_update_invalid_ancestry_proof() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let mut execution_header_update = Box::new(load_execution_header_update_fixture()); - if let Some(ref mut ancestry_proof) = execution_header_update.ancestry_proof { - ancestry_proof.header_branch[0] = TEST_HASH.into() - } - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::InvalidAncestryMerkleProof - ); - }); -} - -#[test] -fn submit_execution_header_update_invalid_execution_header_proof() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let mut execution_header_update = Box::new(load_execution_header_update_fixture()); - execution_header_update.execution_branch[0] = TEST_HASH.into(); - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::InvalidExecutionHeaderProof - ); - }); -} - -#[test] -fn submit_execution_header_update_that_skips_block() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let execution_header_update = Box::new(load_execution_header_update_fixture()); - let mut skipped_block_execution_header_update = - Box::new(load_execution_header_update_fixture()); - let mut skipped_execution_header = - skipped_block_execution_header_update.execution_header.clone(); - - skipped_execution_header = match skipped_execution_header { - VersionedExecutionPayloadHeader::Capella(execution_payload_header) => { - let mut mut_execution_payload_header = execution_payload_header.clone(); - mut_execution_payload_header.block_number = execution_payload_header.block_number + 2; - VersionedExecutionPayloadHeader::Capella(mut_execution_payload_header) - }, - VersionedExecutionPayloadHeader::Deneb(execution_payload_header) => { - let mut mut_execution_payload_header = execution_payload_header.clone(); - mut_execution_payload_header.block_number = execution_payload_header.block_number + 2; - VersionedExecutionPayloadHeader::Deneb(mut_execution_payload_header) - }, - }; - - skipped_block_execution_header_update.execution_header = skipped_execution_header; - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_ok!(EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update.clone() - )); - assert!(>::contains_key( - execution_header_update.execution_header.block_hash() - )); - assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - skipped_block_execution_header_update - ), - Error::::ExecutionHeaderSkippedBlock - ); - }); -} - -#[test] -fn submit_execution_header_update_that_is_also_finalized_header_which_is_not_stored() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let mut execution_header_update = Box::new(load_execution_header_update_fixture()); - execution_header_update.ancestry_proof = None; - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::ExpectedFinalizedHeaderNotStored - ); - }); -} - -#[test] -fn submit_execution_header_update_that_is_also_finalized_header_which_is_stored_but_slots_dont_match( -) { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let mut execution_header_update = Box::new(load_execution_header_update_fixture()); - execution_header_update.ancestry_proof = None; - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - - let block_root: H256 = execution_header_update.header.hash_tree_root().unwrap(); - - >::insert( - block_root, - CompactBeaconState { - slot: execution_header_update.header.slot + 1, - block_roots_root: Default::default(), - }, - ); - LatestFinalizedBlockRoot::::set(block_root); - - assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::ExpectedFinalizedHeaderNotStored - ); - }); -} - -#[test] -fn submit_execution_header_not_finalized() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let update = Box::new(load_execution_header_update_fixture()); - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - - >::mutate(>::get(), |x| { - let prev = x.unwrap(); - *x = Some(CompactBeaconState { slot: update.header.slot - 1, ..prev }); - }); - - assert_err!( - EthereumBeaconClient::submit_execution_header(RuntimeOrigin::signed(1), update), - Error::::HeaderNotFinalized - ); - }); -} - /// Check that a gap of more than 8192 slots between finalized headers is not allowed. #[test] fn submit_finalized_header_update_with_too_large_gap() { @@ -943,37 +639,21 @@ fn submit_finalized_header_update_with_gap_at_limit() { #[test] fn verify_message() { - let header = get_message_verification_header(); let (event_log, proof) = get_message_verification_payload(); - let block_hash = proof.block_hash; new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_ok!(EthereumBeaconClient::verify(&event_log, &proof)); }); } -#[test] -fn verify_message_missing_header() { - let (event_log, proof) = get_message_verification_payload(); - - new_tester().execute_with(|| { - assert_err!( - EthereumBeaconClient::verify(&event_log, &proof), - VerificationError::HeaderNotFound - ); - }); -} - #[test] fn verify_message_invalid_proof() { - let header = get_message_verification_header(); let (event_log, mut proof) = get_message_verification_payload(); - proof.data.1[0] = TEST_HASH.into(); - let block_hash = proof.block_hash; + proof.receipt_proof.1[0] = TEST_HASH.into(); new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_err!( EthereumBeaconClient::verify(&event_log, &proof), VerificationError::InvalidProof @@ -983,29 +663,28 @@ fn verify_message_invalid_proof() { #[test] fn verify_message_invalid_receipts_root() { - let mut header = get_message_verification_header(); - let (event_log, proof) = get_message_verification_payload(); - let block_hash = proof.block_hash; - header.receipts_root = TEST_HASH.into(); + let (event_log, mut proof) = get_message_verification_payload(); + let mut payload = deneb::ExecutionPayloadHeader::default(); + payload.receipts_root = TEST_HASH.into(); + proof.execution_proof.execution_header = VersionedExecutionPayloadHeader::Deneb(payload); new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_err!( EthereumBeaconClient::verify(&event_log, &proof), - VerificationError::InvalidProof + VerificationError::InvalidExecutionProof( + Error::::BlockBodyHashTreeRootFailed.into() + ) ); }); } #[test] fn verify_message_invalid_log() { - let header = get_message_verification_header(); let (mut event_log, proof) = get_message_verification_payload(); - let block_hash = proof.block_hash; event_log.topics = vec![H256::zero(); 10]; - new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_err!( EthereumBeaconClient::verify(&event_log, &proof), VerificationError::InvalidLog @@ -1015,13 +694,11 @@ fn verify_message_invalid_log() { #[test] fn verify_message_receipt_does_not_contain_log() { - let header = get_message_verification_header(); let (mut event_log, proof) = get_message_verification_payload(); - let block_hash = proof.block_hash; event_log.data = hex!("f9013c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000002b8c000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000068000f000000000000000101d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec70100000101001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c0000e8890423c78a0000000000000000000000000000000000000000000000000000000000000000").to_vec(); new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_err!( EthereumBeaconClient::verify(&event_log, &proof), VerificationError::LogNotFound @@ -1033,7 +710,6 @@ fn verify_message_receipt_does_not_contain_log() { fn set_operating_mode() { let checkpoint = Box::new(load_checkpoint_update_fixture()); let update = Box::new(load_finalized_header_update_fixture()); - let execution_header_update = Box::new(load_execution_header_update_fixture()); new_tester().execute_with(|| { assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); @@ -1047,14 +723,6 @@ fn set_operating_mode() { EthereumBeaconClient::submit(RuntimeOrigin::signed(1), update), Error::::Halted ); - - assert_noop!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::Halted - ); }); } @@ -1070,3 +738,107 @@ fn set_operating_mode_root_only() { ); }); } + +#[test] +fn verify_execution_proof_invalid_ancestry_proof() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let mut execution_header_update = Box::new(load_execution_proof_fixture()); + if let Some(ref mut ancestry_proof) = execution_header_update.ancestry_proof { + ancestry_proof.header_branch[0] = TEST_HASH.into() + } + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + assert_err!( + EthereumBeaconClient::verify_execution_proof(&execution_header_update), + Error::::InvalidAncestryMerkleProof + ); + }); +} + +#[test] +fn verify_execution_proof_invalid_execution_header_proof() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let mut execution_header_update = Box::new(load_execution_proof_fixture()); + execution_header_update.execution_branch[0] = TEST_HASH.into(); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + assert_err!( + EthereumBeaconClient::verify_execution_proof(&execution_header_update), + Error::::InvalidExecutionHeaderProof + ); + }); +} + +#[test] +fn verify_execution_proof_that_is_also_finalized_header_which_is_not_stored() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let mut execution_header_update = Box::new(load_execution_proof_fixture()); + execution_header_update.ancestry_proof = None; + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + assert_err!( + EthereumBeaconClient::verify_execution_proof(&execution_header_update), + Error::::ExpectedFinalizedHeaderNotStored + ); + }); +} + +#[test] +fn submit_execution_proof_that_is_also_finalized_header_which_is_stored_but_slots_dont_match() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let mut execution_header_update = Box::new(load_execution_proof_fixture()); + execution_header_update.ancestry_proof = None; + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + + let block_root: H256 = execution_header_update.header.hash_tree_root().unwrap(); + + >::insert( + block_root, + CompactBeaconState { + slot: execution_header_update.header.slot + 1, + block_roots_root: Default::default(), + }, + ); + LatestFinalizedBlockRoot::::set(block_root); + + assert_err!( + EthereumBeaconClient::verify_execution_proof(&execution_header_update), + Error::::ExpectedFinalizedHeaderNotStored + ); + }); +} + +#[test] +fn verify_execution_proof_not_finalized() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let update = Box::new(load_execution_proof_fixture()); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + + >::mutate(>::get(), |x| { + let prev = x.unwrap(); + *x = Some(CompactBeaconState { slot: update.header.slot - 1, ..prev }); + }); + + assert_err!( + EthereumBeaconClient::verify_execution_proof(&update), + Error::::HeaderNotFinalized + ); + }); +} diff --git a/bridges/snowbridge/pallets/ethereum-client/src/types.rs b/bridges/snowbridge/pallets/ethereum-client/src/types.rs index 5dcefea9f80..8808f989754 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/types.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/types.rs @@ -15,17 +15,7 @@ pub type CheckpointUpdate = primitives::CheckpointUpdate; pub type Update = primitives::Update; pub type NextSyncCommitteeUpdate = primitives::NextSyncCommitteeUpdate; -pub use primitives::ExecutionHeaderUpdate; - -/// ExecutionHeader ring buffer implementation -pub type ExecutionHeaderBuffer = RingBufferMapImpl< - u32, - ::MaxExecutionHeadersToKeep, - crate::ExecutionHeaderIndex, - crate::ExecutionHeaderMapping, - crate::ExecutionHeaders, - OptionQuery, ->; +pub use primitives::{AncestryProof, ExecutionProof}; /// FinalizedState ring buffer implementation pub(crate) type FinalizedBeaconStateBuffer = RingBufferMapImpl< diff --git a/bridges/snowbridge/pallets/ethereum-client/src/weights.rs b/bridges/snowbridge/pallets/ethereum-client/src/weights.rs index e1a5578f466..e4629746aa2 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/weights.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/weights.rs @@ -36,7 +36,6 @@ pub trait WeightInfo { fn force_checkpoint() -> Weight; fn submit() -> Weight; fn submit_with_sync_committee() -> Weight; - fn submit_execution_header() -> Weight; } // For backwards compatibility and tests @@ -59,10 +58,4 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(6)) .saturating_add(RocksDbWeight::get().writes(1)) } - fn submit_execution_header() -> Weight { - Weight::from_parts(113_158_000_u64, 0) - .saturating_add(Weight::from_parts(0, 3537)) - .saturating_add(RocksDbWeight::get().reads(5)) - .saturating_add(RocksDbWeight::get().writes(4)) - } } diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-header-update.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-header-update.json deleted file mode 100755 index 319014249c1..00000000000 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-header-update.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "header": { - "slot": 215, - "proposer_index": 2, - "parent_root": "0x97518f531a252bb6ca547b21aca9da767943ec99211d3b15c804e34c3a523f45", - "state_root": "0xb088b5a3a8c90d6dc919a695cd7bb0267c6f983ea2e675c559ceb8f46cb90b67", - "body_root": "0x0ba23c8224fdd01531d5ad51486353bd524a0b4c20bca704e26d3210616f829b" - }, - "ancestry_proof": { - "header_branch": [ - "0x97518f531a252bb6ca547b21aca9da767943ec99211d3b15c804e34c3a523f45", - "0x5ce0db996bd499c2b4f7a93263d5aafd052f420efb617cce6fdd54e25516aa45", - "0x84f0e373b66011ce774c7061440c0a50a51cce2b4b335395eee3e563d605597f", - "0x48f9ccc5f9594142c18c3b5c39a99f0549329c6ab3ba06c9a50030eadca87770", - "0xf89d6e311e05bc75a6f63ce118bccce254551f1a88d54c3b4f773f81f946bd99", - "0x2edd6d893c22636675147c07dfcdb541a146e87c3f15b51c388be4868246dc9b", - "0xd76b7de5f856e3208a91a42c9c398a7f4fab35e667bf916346050ae742514a2d", - "0x83a2e233e76385953ca41de4c3afe60471a61f0cc1b3846b4a0670e3e563b747", - "0xe783a5a109c2ad74e4eb53e8f6b11b31266a92a9e16c1fd5873109c5d41b282c", - "0xd4ea1ef3869ee6a0fd0b19d7d70027d144eecd4f1d32cbf47632a0a9069164b9", - "0xf8179564b58eb93a850d35e4156a04db651106442ad891c3e85155c1762792f1", - "0x4cbb1edb48cf1e32fb30db60aaaeaf6190ffe4d0c8dbc96cec307daecb78be12", - "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f" - ], - "finalized_block_root": "0x890a7f23b9ed2160901654be9efc575d6830ca860e2a97866ae3423fb7bd7231" - }, - "execution_header": { - "Deneb": { - "parent_hash": "0xd82ec63f5c5e6ba61d62f09c188f158e6449b94bdcc31941e68639eec3c4cf7a", - "fee_recipient": "0x0000000000000000000000000000000000000000", - "state_root": "0x8b65545fe5f3216b47b6339b9c91ca2b7f1032a970b04246d9e9fb4460ee34c3", - "receipts_root": "0x7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095", - "logs_bloom": "0x00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010", - "prev_randao": "0x6d9e2a012d82b1b6cb0a2c1c1ed24cc16dbb56e6e39ae545371e0666ab057862", - "block_number": 215, - "gas_limit": 64842908, - "gas_used": 119301, - "timestamp": 1705859527, - "extra_data": "0xd983010d0a846765746888676f312e32312e358664617277696e", - "base_fee_per_gas": 7, - "block_hash": "0x48498dbfbcfae53a7f4c289ee00747aceea925f6260c50ead5a33e1c55c40f98", - "transactions_root": "0x5ebc1347fe3df0611d4f66b19bd8e1c6f4eaed0371d850f14c83b1c77ea234e6", - "withdrawals_root": "0x792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535", - "blob_gas_used": 0, - "excess_blob_gas": 0 - } - }, - "execution_branch": [ - "0xf8c69d3830406d668619bcccc13c8dddde41e863326f7418b241d5924c4ad34a", - "0xb46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb", - "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", - "0xf4d6b5cf9c6e212615c3674fa625d04eb1114153fb221ef5ad02aa433fc67cfb" - ] -} \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-proof.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-proof.json new file mode 100755 index 00000000000..f55898087df --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-proof.json @@ -0,0 +1,54 @@ +{ + "header": { + "slot": 393, + "proposer_index": 4, + "parent_root": "0x6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef", + "state_root": "0xb62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434", + "body_root": "0x04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db" + }, + "ancestry_proof": { + "header_branch": [ + "0x6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef", + "0xfa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3", + "0xcadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d", + "0x33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c", + "0x2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf", + "0xe1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1", + "0xaa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97", + "0x160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f", + "0xf68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535", + "0x1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc", + "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", + "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", + "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f" + ], + "finalized_block_root": "0x751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46" + }, + "execution_header": { + "Deneb": { + "parent_hash": "0x8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2", + "fee_recipient": "0x0000000000000000000000000000000000000000", + "state_root": "0x96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b", + "receipts_root": "0xdccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284", + "logs_bloom": "0x00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010", + "prev_randao": "0x62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67", + "block_number": 393, + "gas_limit": 54492273, + "gas_used": 199644, + "timestamp": 1710552813, + "extra_data": "0xd983010d0b846765746888676f312e32312e368664617277696e", + "base_fee_per_gas": 7, + "block_hash": "0x6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131", + "transactions_root": "0x2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d", + "withdrawals_root": "0x792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535", + "blob_gas_used": 0, + "excess_blob_gas": 0 + } + }, + "execution_branch": [ + "0xa6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d", + "0xb46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb", + "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", + "0xd3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da" + ] +} \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/finalized-header-update.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/finalized-header-update.json index f9d5324d57b..2dec5cc56fa 100755 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/finalized-header-update.json +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/finalized-header-update.json @@ -1,38 +1,40 @@ { "attested_header": { - "slot": 2566, - "proposer_index": 6, - "parent_root": "0x6eb9f13a2c496318ce1ab3087bbd872f5c9519a1a7ca8231a2453e3cb523af00", - "state_root": "0xc8cb12766113dff7e46d2917267bf33d0626d99dd47715fcdbc5c65fad3c04b4", - "body_root": "0xd8cfd0d7bc9bc3724417a1655bb0a67c0765ca36197320f4d834150b52ef1420" + "slot": 933, + "proposer_index": 1, + "parent_root": "0xf5fc63e2780ca302b97aea73fc95d74d702b5afe9a772c2b68f695026337b620", + "state_root": "0xd856d11636bc4d866e78be9e747b222b0977556a367ab42e4085277301438050", + "body_root": "0x5689091ab4eb76c2e876271add4924e1c66ce987c300c24aac2ad8c703e9a33f" }, "sync_aggregate": { "sync_committee_bits": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "sync_committee_signature": "0x9296f9a0387f2cac47008e22ad7c3cd3d49d35384c13e6aa1eacca7dca7c3d2ca81515e50eb3396b9550ed20ef7d8fa2049a186598feb2c00e93728045fcff917733d1827481b8fc95f3913e27fc70112c2490496eb57bb7181f02c3f9fd471f" + "sync_committee_signature": "0x93a3d482fe2a2f7fd2b634169752a8fddf1dc28b23a020b398be8526faf37a74ca0f6db1bed78a9c7256c09a6115235e108e0e8a7ce09287317b0856c4b77dfa5adba6cf4c3ebea5bfa4cd2fcde80fd0a532f2defe65d530201d5d2258796559" }, - "signature_slot": 2567, + "signature_slot": 934, "next_sync_committee_update": null, "finalized_header": { - "slot": 2496, - "proposer_index": 2, - "parent_root": "0xc99e49787106733eeebab4d93eb326e1f2214575c9d928f0c4ab0da0776f1622", - "state_root": "0xfbf8a08c86ef36bd173e37e733da4a78aa8e85fee99a990e858dd12a59087fde", - "body_root": "0xa2a8ad06901447b2807a9059580a4c40d8a941f325b1343c69f7c7c6c90e4ab0" + "slot": 864, + "proposer_index": 4, + "parent_root": "0x614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614", + "state_root": "0x5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a", + "body_root": "0x0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e" }, "finality_branch": [ - "0x4e00000000000000000000000000000000000000000000000000000000000000", + "0x1b00000000000000000000000000000000000000000000000000000000000000", "0x10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7", "0x98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d", - "0x958b8e43347f6df6fa5eb3d62d06a862381a6585aa40640dd1c0de11f1cf89c1", - "0xf107dce04faa86a28fc5d4a618be9cb8d4fc3c23d6c42c3624f3ff4bf6586a03", - "0xa501cdc02e86969ac3e4d0c5a36f4f049efaa1ab8cb6693f51d130eb52a80f30" + "0xf12d9aededc72724e417b518fe6f847684f26f81616243dedf8c551cc7d504f5", + "0x89a85d0907ab3fd6e00ae385f61d456c6191646404ae7b8d23d0e60440cf4d00", + "0x9fc943b6020eb61d780d78bcc6f6102a81d2c868d58f36e61c6e286a2dc4d8c2" ], - "block_roots_root": "0xd160b7687041891b73e54b06fc4e04f82d0fa8fdd76705895e216c6b24709dfe", + "block_roots_root": "0xb9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10", "block_roots_branch": [ - "0x105290e42d98ab6a0ada6e55453cede36c672abf645eeb986b88d7487616e135", - "0x9da41f274bcdf6122335443d9ce94d07163b48dba3e2f9499ff56f4e48b48b99", - "0xecea7e1d3152d8130e83afdfe34b4de4ba2b69a33c9471991096daf454de9cf5", - "0xb2bf1758e50b2bfff29169fbc70fdb884b2b05bb615dbc53567574da6f4f1ae2", - "0xcd87069daf70975779126d6af833b7d636c75ca4d5e750ebcad0e76408a5e5bf" - ] + "0x733422bd810895dab74cbbe07c69dd440cbb51f573181ad4dddac30fcdd0f41f", + "0x9b9eca73ab01d14549c325ba1b4610bb20bf1f8ec2dbd649f9d8cc7f3cea75fa", + "0xbcc666ad0ad9f9725cbd682bc95589d35b1b53b2a615f1e6e8dd5e086336becf", + "0x3069b547a08f703a1715016e926cbd64e71f93f64fb68d98d8c8f1ab745c46e5", + "0xc2de7e1097239404e17b263cfa0473533cc41e903cb03440d633bc5c27314cb4" + ], + "execution_header": null, + "execution_branch": null } \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/inbound-message.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/inbound-message.json index 5aa5a59f023..6589dca5fb4 100644 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/inbound-message.json +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/inbound-message.json @@ -1,31 +1,79 @@ { - "execution_header": { - "parent_hash": "0xd82ec63f5c5e6ba61d62f09c188f158e6449b94bdcc31941e68639eec3c4cf7a", - "state_root": "0x8b65545fe5f3216b47b6339b9c91ca2b7f1032a970b04246d9e9fb4460ee34c3", - "receipts_root": "0x7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095", - "block_number": 215 + "event_log": { + "address": "0xeda338e4dc46038493b885327842fd3e301cab39", + "topics": [ + "0x7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f", + "0xc173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539", + "0x5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000" }, - "message": { - "event_log": { - "address": "0xeda338e4dc46038493b885327842fd3e301cab39", - "topics": [ - "0x7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f", - "0xc173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539", - "0x5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0" + "proof": { + "block_hash": "0x6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131", + "tx_index": 0, + "receipt_proof": { + "keys": [ + "0xdccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284", + "0x4a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000" + "values": [ + "0xf851a09c01dd6d2d8de951c45af23d3ad00829ce021c04d6c8acbe1612d456ee320d4980808080808080a04a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f8080808080808080", + "0xf9028c30b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000" + ] }, - "Proof": { - "block_hash": "0x48498dbfbcfae53a7f4c289ee00747aceea925f6260c50ead5a33e1c55c40f98", - "tx_index": 0, - "data": { - "keys": [ - "0x7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095" + "execution_proof": { + "header": { + "slot": 393, + "proposer_index": 4, + "parent_root": "0x6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef", + "state_root": "0xb62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434", + "body_root": "0x04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db" + }, + "ancestry_proof": { + "header_branch": [ + "0x6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef", + "0xfa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3", + "0xcadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d", + "0x33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c", + "0x2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf", + "0xe1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1", + "0xaa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97", + "0x160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f", + "0xf68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535", + "0x1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc", + "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", + "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", + "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f" ], - "values": [ - "0xf9028e822080b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000" - ] - } + "finalized_block_root": "0x751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46" + }, + "execution_header": { + "Deneb": { + "parent_hash": "0x8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2", + "fee_recipient": "0x0000000000000000000000000000000000000000", + "state_root": "0x96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b", + "receipts_root": "0xdccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284", + "logs_bloom": "0x00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010", + "prev_randao": "0x62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67", + "block_number": 393, + "gas_limit": 54492273, + "gas_used": 199644, + "timestamp": 1710552813, + "extra_data": "0xd983010d0b846765746888676f312e32312e368664617277696e", + "base_fee_per_gas": 7, + "block_hash": "0x6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131", + "transactions_root": "0x2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d", + "withdrawals_root": "0x792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535", + "blob_gas_used": 0, + "excess_blob_gas": 0 + } + }, + "execution_branch": [ + "0xa6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d", + "0xb46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb", + "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", + "0xd3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da" + ] } } } \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json index 202790c1db5..a62d646617e 100755 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json @@ -1,10 +1,10 @@ { "header": { - "slot": 2496, - "proposer_index": 2, - "parent_root": "0xc99e49787106733eeebab4d93eb326e1f2214575c9d928f0c4ab0da0776f1622", - "state_root": "0xfbf8a08c86ef36bd173e37e733da4a78aa8e85fee99a990e858dd12a59087fde", - "body_root": "0xa2a8ad06901447b2807a9059580a4c40d8a941f325b1343c69f7c7c6c90e4ab0" + "slot": 864, + "proposer_index": 4, + "parent_root": "0x614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614", + "state_root": "0x5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a", + "body_root": "0x0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e" }, "current_sync_committee": { "pubkeys": [ @@ -525,18 +525,18 @@ }, "current_sync_committee_branch": [ "0x3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59", - "0x93880225bf99a0c5ec22b266ff829837754e9c5edf37a68c05b8f803fd82fa45", - "0x4c60656ec9a95fcf11030ad309c716b5b15beb7f60a0bcfc7c9d4eff505472ff", - "0x22d1645fceb4bf9a695043dda19a53e784ec70df6a6b1bd66ea30eba1cca5f2f", - "0xa8fc6cad84ceefc633ec56c2d031d525e1cb4b51c70eb252919fce5bba9a1fde" + "0xa9e90f89e7f90fd5d79a6bbcaf40ba5cfc05ab1b561ac51c84867c32248d5b1e", + "0xbd1a76b03e02402bb24a627de1980a80ab17691980271f597b844b89b497ef75", + "0x07bbcd27c7cad089023db046eda17e8209842b7d97add8b873519e84fe6480e7", + "0x94c11eeee4cb6192bf40810f23486d8c75dfbc2b6f28d988d6f74435ede243b0" ], "validators_root": "0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69", - "block_roots_root": "0xd160b7687041891b73e54b06fc4e04f82d0fa8fdd76705895e216c6b24709dfe", + "block_roots_root": "0xb9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10", "block_roots_branch": [ - "0x105290e42d98ab6a0ada6e55453cede36c672abf645eeb986b88d7487616e135", - "0x9da41f274bcdf6122335443d9ce94d07163b48dba3e2f9499ff56f4e48b48b99", - "0xecea7e1d3152d8130e83afdfe34b4de4ba2b69a33c9471991096daf454de9cf5", - "0xb2bf1758e50b2bfff29169fbc70fdb884b2b05bb615dbc53567574da6f4f1ae2", - "0xcd87069daf70975779126d6af833b7d636c75ca4d5e750ebcad0e76408a5e5bf" + "0x733422bd810895dab74cbbe07c69dd440cbb51f573181ad4dddac30fcdd0f41f", + "0x9b9eca73ab01d14549c325ba1b4610bb20bf1f8ec2dbd649f9d8cc7f3cea75fa", + "0xbcc666ad0ad9f9725cbd682bc95589d35b1b53b2a615f1e6e8dd5e086336becf", + "0x3069b547a08f703a1715016e926cbd64e71f93f64fb68d98d8c8f1ab745c46e5", + "0xc2de7e1097239404e17b263cfa0473533cc41e903cb03440d633bc5c27314cb4" ] } \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update.json index 6bf20355c7a..4d601d7d8f0 100755 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update.json +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update.json @@ -2,13 +2,13 @@ "attested_header": { "slot": 129, "proposer_index": 5, - "parent_root": "0xe32b6c18f029e755b0273dc1c4fa2bc4979794c8286ad40276c1b8a8e36049d8", - "state_root": "0x5ec9dacf25a5f09f20be0c59246b3d8dcfe64bd085b4bac5cec180690339801e", - "body_root": "0x4080cf2412d6ff77fc3164ad6155423a7112f207f173145ec16371a93f481f87" + "parent_root": "0xc2def03fe44a2802130ca1a6d8406e4ccf4f344fec7075d4d84431cd4a8b0904", + "state_root": "0xfa62cde6666add7353d7aedcb61ebe3c6c84b5361e34f814825b1250affb5be4", + "body_root": "0x0f9c69f243fe7b5fa5860396c66c720a9e8b1e526e7914188930497cc4a9134c" }, "sync_aggregate": { "sync_committee_bits": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "sync_committee_signature": "0xa761c3333fbb3d36bc8f65454f898da38001499dcd37494cf3d86940a995399ae649216ba4c985af154f83f72c8b1856079b7636a7a8d7d3f7602df2cbf699edb72b65253e82de4d9cc4db7377eafb22f799129f63f094a21c00675bdd5cc243" + "sync_committee_signature": "0x810cfde2afea3e276256c09bdf1cd321c33dcadeefddcfd24f488e6f756d917cfda90b5b437b3a4b4ef880985afa28a40cf565ec0a82877ddee36adc01d55d9d4a911ae3e22556e4c2636f1c707366fba019fb49450440fcd263d0b054b04bf0" }, "signature_slot": 130, "next_sync_committee_update": { @@ -531,33 +531,35 @@ }, "next_sync_committee_branch": [ "0x3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59", - "0xfd1e5ff5d4a15081efe3ff17857b1f95984c9a271b1c41c2f81f43e60c2cc541", - "0xe1c97f93bb7352d395d1ff8ee29881572cb7eb5d71634783701171dcd30cd93d", - "0x77fa2170ddbd89b15dae02f2e6cf9f76c8e00d1c4217320acffbe01576d0da61", - "0xe97288e0627219087a024078d69445f34f0583a6350a7c3c40c39fd1fa6f8d68" + "0x43276bee17fc9fba3f4866e902f0e5b5b308d79db91154bb8bf819973837a7d9", + "0x5572348e13ce59446ca0ea7cfeed07579da05f121920a76559e19bda94dd81cd", + "0x2d58adca9f3c742530de037f1933d6de1920ea4b68581613d4bc32b71547f221", + "0x7072b3c6577cd5a89b3234968f316f54630bb97eafbdb59e5b61637a9640255f" ] }, "finalized_header": { "slot": 64, "proposer_index": 4, - "parent_root": "0x0f7bc2353778c14c7f6dba0fc5fe6eec87228b0d3a5447b61dce67b4d9338de3", - "state_root": "0xfeb990de653ce494c0a263f820eaf05a9300dbdc30cb6065ede602827bfccde4", - "body_root": "0xf5235cd8c24f2695fc5b7989926305c10ad8cf5a87d62a739f675f5543df2ec1" + "parent_root": "0xa876486aaad7ddb897f369fd22d0a9903cd61d00c9e0dfe7998dd68d1008c678", + "state_root": "0x818e21c3388575f8ccc9ff17ec79d5a57915bcd31bccf47770f65a18e068416b", + "body_root": "0x1d1f73b864b3bb7e11ff91b56ca1381e0f9ca8122b2c542db88243604c763019" }, "finality_branch": [ "0x0200000000000000000000000000000000000000000000000000000000000000", "0x10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7", "0x98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d", - "0xe1c97f93bb7352d395d1ff8ee29881572cb7eb5d71634783701171dcd30cd93d", - "0x77fa2170ddbd89b15dae02f2e6cf9f76c8e00d1c4217320acffbe01576d0da61", - "0xe97288e0627219087a024078d69445f34f0583a6350a7c3c40c39fd1fa6f8d68" + "0x5572348e13ce59446ca0ea7cfeed07579da05f121920a76559e19bda94dd81cd", + "0x2d58adca9f3c742530de037f1933d6de1920ea4b68581613d4bc32b71547f221", + "0x7072b3c6577cd5a89b3234968f316f54630bb97eafbdb59e5b61637a9640255f" ], - "block_roots_root": "0x6fcdfd1c3fb1bdd421fe59dddfff3855b5ed5e30373887991a0059d019ad12bc", + "block_roots_root": "0x715b08694bef183a6d94b3113d16a7129f89fc3edec85a7e0eaf6ef9153552ef", "block_roots_branch": [ - "0x94b59531f172bc24f914bc0c10104ccb158676850f8cc3b47b6ddb7f096ebdd7", - "0x22470ed9155a938587d44d5fa19217c0f939d8862e504e67cd8cb4d1b960795e", - "0xfeec3ef1a68f93849e71e84f90b99602cccc31868137b6887ca8244a4b979e8e", + "0x4028c72c71b6ce80ea7d18b2c9471f4e4fa39746261a9921e832a4a2f9bdf7bb", + "0x75f98062661785d3290b7bd998b64446582baa49210733fd4603e1a97cd45a44", + "0x6fb757f44052f30c464810f01b0132adfa1a5446d8715b41e9af88eee1ee3e65", "0x5340ad5877c72dca689ca04bc8fedb78d67a4801d99887937edd8ccd29f87e82", - "0xf5ff4b0c6190005015889879568f5f0d9c40134c7ec4ffdda47950dcd92395ad" - ] + "0xf2b3cb56753939a728ccad399a434ca490f018f2f331529ec0d8b2d59c509271" + ], + "execution_header": null, + "execution_branch": null } \ No newline at end of file diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs index 4f3445b2905..00adcdfa186 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs @@ -2,17 +2,6 @@ // SPDX-FileCopyrightText: 2023 Snowfork #![cfg_attr(not(feature = "std"), no_std)] -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::Message; -use sp_core::RuntimeDebug; - pub mod register_token; -pub mod register_token_with_insufficient_fee; pub mod send_token; pub mod send_token_to_penpal; - -#[derive(Clone, RuntimeDebug)] -pub struct InboundQueueFixture { - pub execution_header: CompactExecutionHeader, - pub message: Message, -} diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token.rs index b8d510e6b13..340b2fadfac 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token.rs +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token.rs @@ -3,20 +3,16 @@ // Generated, do not edit! // See ethereum client README.md for instructions to generate -use crate::InboundQueueFixture; use hex_literal::hex; -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::{Log, Message, Proof}; +use snowbridge_beacon_primitives::{ + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, +}; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; +use sp_core::U256; use sp_std::vec; pub fn make_register_token_message() -> InboundQueueFixture { InboundQueueFixture { - execution_header: CompactExecutionHeader{ - parent_hash: hex!("d5de3dd02c96dbdc8aaa4db70a1e9fdab5ded5f4d52f18798acd56a3d37d1ad6").into(), - block_number: 772, - state_root: hex!("49cba2a79b23ad74cefe80c3a96699825d1cda0f75bfceb587c5549211c86245").into(), - receipts_root: hex!("7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095").into(), - }, message: Message { event_log: Log { address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), @@ -28,14 +24,74 @@ pub fn make_register_token_message() -> InboundQueueFixture { data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").into(), }, proof: Proof { - block_hash: hex!("392182a385b3a417e8ddea8b252953ee81e6ec0fb09d9056c96c89fbeb703a3f").into(), - tx_index: 0, - data: (vec![ - hex!("7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095").to_vec(), + receipt_proof: (vec![ + hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").to_vec(), + hex!("4a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f").to_vec(), ], vec![ - hex!("f9028e822080b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").to_vec(), + hex!("f851a09c01dd6d2d8de951c45af23d3ad00829ce021c04d6c8acbe1612d456ee320d4980808080808080a04a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f8080808080808080").to_vec(), + hex!("f9028c30b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").to_vec(), ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 393, + proposer_index: 4, + parent_root: hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + state_root: hex!("b62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434").into(), + body_root: hex!("04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + hex!("fa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3").into(), + hex!("cadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d").into(), + hex!("33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c").into(), + hex!("2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf").into(), + hex!("e1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1").into(), + hex!("aa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97").into(), + hex!("160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f").into(), + hex!("f68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535").into(), + hex!("1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), + hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), + ], + finalized_block_root: hex!("751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b").into(), + receipts_root: hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010").into(), + prev_randao: hex!("62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67").into(), + block_number: 393, + gas_limit: 54492273, + gas_used: 199644, + timestamp: 1710552813, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131").into(), + transactions_root: hex!("2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("a6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("d3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da").into(), + ], + } }, }, + finalized_header: BeaconHeader { + slot: 864, + proposer_index: 4, + parent_root: hex!("614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614").into(), + state_root: hex!("5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a").into(), + body_root: hex!("0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e").into(), + }, + block_roots_root: hex!("b9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10").into(), } } diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token_with_insufficient_fee.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token_with_insufficient_fee.rs deleted file mode 100644 index dfda0b2b427..00000000000 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token_with_insufficient_fee.rs +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -// Generated, do not edit! -// See ethereum client README.md for instructions to generate - -use crate::InboundQueueFixture; -use hex_literal::hex; -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::{Log, Message, Proof}; -use sp_std::vec; - -pub fn make_register_token_with_insufficient_fee_message() -> InboundQueueFixture { - InboundQueueFixture { - execution_header: CompactExecutionHeader{ - parent_hash: hex!("998e81dc6df788a920b67e058fbde0dc3f4ec6f11f3f7cd8c3148e6d99584885").into(), - block_number: 338, - state_root: hex!("30ef9c9db2609de19bbc6c3cbeddac889e82bbcb2db20304b3abdfbdc7134cbf").into(), - receipts_root: hex!("969335c3132a007cb8b5886a3c23dd8da63cba04aeda29857a86ee1c13dae782").into(), - }, - message: Message { - event_log: Log { - address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), - topics: vec![ - hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), - hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), - hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), - ], - // insufficient xcm fee as only 1000(hex:e803) - data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7de8030000000000000000000000000000000000000000000000000000000000000000").into(), - }, - proof: Proof { - block_hash: hex!("5976f37f0e331d194eb331df74355ef47565c3a1bd11c95a45b681f6917085c1").into(), - tx_index: 0, - data: (vec![ - hex!("969335c3132a007cb8b5886a3c23dd8da63cba04aeda29857a86ee1c13dae782").to_vec(), - ], vec![ - hex!("f9028e822080b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7de8030000000000000000000000000000000000000000000000000000000000000000").to_vec(), - ]), - }, - }, - } -} diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token.rs index 2562217100e..4075febab59 100755 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token.rs +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token.rs @@ -3,20 +3,16 @@ // Generated, do not edit! // See ethereum client README.md for instructions to generate -use crate::InboundQueueFixture; use hex_literal::hex; -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::{Log, Message, Proof}; +use snowbridge_beacon_primitives::{ + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, +}; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; +use sp_core::U256; use sp_std::vec; pub fn make_send_token_message() -> InboundQueueFixture { InboundQueueFixture { - execution_header: CompactExecutionHeader{ - parent_hash: hex!("920cecde45d428e3a77590b70f8533cf4c2c36917b8a7b74c915e7fa3dae7075").into(), - block_number: 1148, - state_root: hex!("bbc6ba0e9940d641afecbbaf3f97abd2b9ffaf2f6bd4879c4a71e659eca89978").into(), - receipts_root: hex!("9f3340b57eddc1f86de30776db57faeca80269a3dd459031741988dec240ce34").into(), - }, message: Message { event_log: Log { address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), @@ -28,14 +24,72 @@ pub fn make_send_token_message() -> InboundQueueFixture { data: hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000").into(), }, proof: Proof { - block_hash: hex!("d3c155f123c3cbff22f3d7869283e02179edea9ffa7a5e9a4d8414c2a6b8991f").into(), - tx_index: 0, - data: (vec![ - hex!("9f3340b57eddc1f86de30776db57faeca80269a3dd459031741988dec240ce34").to_vec(), + receipt_proof: (vec![ + hex!("f9d844c5b79638609ba385b910fec3b5d891c9d7b189f135f0432f33473de915").to_vec(), ], vec![ - hex!("f90451822080b9044b02f90447018301bcb9b9010000800000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020200000000000000000000000000003000000040080008000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200800000000000f9033cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000000000000000000000000000000000000000003e8b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000208eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48f9013c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26fb8c000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000").to_vec(), + hex!("f90451822080b9044b02f90447018301bcb6b9010000800000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020200000000000000000000000000003000000040080008000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200800000000000f9033cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000000000000000000000000000000000000000003e8b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000208eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48f9013c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26fb8c000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000").to_vec(), ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 2321, + proposer_index: 5, + parent_root: hex!("2add14727840d3a5ea061e14baa47030bb81380a65999200d119e73b86411d20").into(), + state_root: hex!("d962981467920bb2b7efa4a7a1baf64745582c3250857f49a957c5dae9a0da39").into(), + body_root: hex!("18e3f7f51a350f371ad35d166f2683b42af51d1836b295e4093be08acb0dcb7a").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("2add14727840d3a5ea061e14baa47030bb81380a65999200d119e73b86411d20").into(), + hex!("48b2e2f5256906a564e5058698f70e3406765fefd6a2edc064bb5fb88aa2ed0a").into(), + hex!("e5ed7c704e845418219b2fda42cd2f3438ffbe4c4b320935ae49439c6189f7a7").into(), + hex!("4a7ce24526b3f571548ad69679e4e260653a1b3b911a344e7f988f25a5c917a7").into(), + hex!("46fc859727ab0d0e8c344011f7d7a4426ccb537bb51363397e56cc7153f56391").into(), + hex!("f496b6f85a7c6c28a9048f2153550a7c5bcb4b23844ed3b87f6baa646124d8a3").into(), + hex!("7318644e474beb46e595a1875acc7444b937f5208065241911d2a71ac50c2de3").into(), + hex!("5cf48519e518ac64286aef5391319782dd38831d5dcc960578a6b9746d5f8cee").into(), + hex!("efb3e50fa39ca9fe7f76adbfa36fa8451ec2fd5d07b22aaf822137c04cf95a76").into(), + hex!("2206cd50750355ffaef4a67634c21168f2b564c58ffd04f33b0dc7af7dab3291").into(), + hex!("1a4014f6c4fcce9949fba74cb0f9e88df086706f9e05560cc9f0926f8c90e373").into(), + hex!("2df7cc0bcf3060be4132c63da7599c2600d9bbadf37ab001f15629bc2255698e").into(), + hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), + ], + finalized_block_root: hex!("f869dd1c9598043008a3ac2a5d91b3d6c7b0bb3295b3843bc84c083d70b0e604").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("5d7859883dde1eba6c98b20eac18426134b25da2a89e5e360f3343b15e0e0a31").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("f8fbebed4c84d46231bd293bb9fbc9340d5c28c284d99fdaddb77238b8960ae2").into(), + receipts_root: hex!("f9d844c5b79638609ba385b910fec3b5d891c9d7b189f135f0432f33473de915").into(), + logs_bloom: hex!("00800000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020200000000000000000000000000003000000040080008000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200800000000000").into(), + prev_randao: hex!("15533eeb366c6386bea5aeb8f425871928348c092209e4377f2418a6dedd7fd0").into(), + block_number: 2321, + gas_limit: 30000000, + gas_used: 113846, + timestamp: 1710554741, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("585a07122a30339b03b6481eae67c2d3de2b6b64f9f426230986519bf0f1bdfe").into(), + transactions_root: hex!("09cd60ee2207d804397c81f7b7e1e5d3307712b136e5376623a80317a4bdcd7a").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("9d419471a9a4719b40e7607781fbe32d9a7766b79805505c78c0c58133496ba2").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("bee375b8f1bbe4cd0e783c78026c1829ae72741c2dead5cab05d6834c5e5df65").into(), + ], + } }, }, + finalized_header: BeaconHeader { + slot: 4032, + proposer_index: 5, + parent_root: hex!("180aaaec59d38c3860e8af203f01f41c9bc41665f4d17916567c80f6cd23e8a2").into(), + state_root: hex!("3341790429ed3bf894cafa3004351d0b99e08baf6c38eb2a54d58e69fd2d19c6").into(), + body_root: hex!("a221e0c695ac7b7d04ce39b28b954d8a682ecd57961d81b44783527c6295f455").into(), + }, + block_roots_root: hex!("5744385ef06f82e67606f49aa29cd162f2e837a68fb7bd82f1fc6155d9f8640f").into(), } } diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token_to_penpal.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token_to_penpal.rs index 86ba3f7ecc1..6a951b568ae 100755 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token_to_penpal.rs +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token_to_penpal.rs @@ -3,39 +3,93 @@ // Generated, do not edit! // See ethereum client README.md for instructions to generate -use crate::InboundQueueFixture; use hex_literal::hex; -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::{Log, Message, Proof}; +use snowbridge_beacon_primitives::{ + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, +}; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; +use sp_core::U256; use sp_std::vec; pub fn make_send_token_to_penpal_message() -> InboundQueueFixture { InboundQueueFixture { - execution_header: CompactExecutionHeader{ - parent_hash: hex!("434148c290f27ee4be34fa344cd7608bf942a4541b27c9d868439631b3f37a8d").into(), - block_number: 816, - state_root: hex!("595e643f9095870e30e85e2bbef7d9e3a39df5aae839d26cf455d3dbf3e5a539").into(), - receipts_root: hex!("c40ab2c4abcfdea4f42195e0ad822806e5423108021c3b542646c7193319a6c1").into(), - }, message: Message { event_log: Log { address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), topics: vec![ hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), - hex!("c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26f").into(), + hex!("be323bced46a1a49c8da2ab62ad5e974fd50f1dabaeed70b23ca5bcf14bfe4aa").into(), ], - data: hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").into(), + data: hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").into(), }, proof: Proof { - block_hash: hex!("6c49a7f8fb2014a23e58a949c95a6743174589a7ce83434b073dc05dec402f3d").into(), - tx_index: 0, - data: (vec![ - hex!("c40ab2c4abcfdea4f42195e0ad822806e5423108021c3b542646c7193319a6c1").to_vec(), + receipt_proof: (vec![ + hex!("106f1eaeac04e469da0020ad5c8a72af66323638bd3f561a3c8236063202c120").to_vec(), ], vec![ - hex!("f90471822080b9046b02f90467018301d30fb9010000800000000000000000000000000000000000000000004000000000000000000400000000004000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020000000000000000000000000000003000000000080018000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200820000000000f9035cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000000000000000000000000000000000000000007d0b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000201cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07cf9015c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26fb8e000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").to_vec(), + hex!("f90471822080b9046b02f904670183017d9cb9010000800000000000008000000000000000000000000000004000000000000000000400000000004000000000001000000010000000000000000000001008000000000000000000000001000008000040000000000000000000000000008000080000000000200000000000000000000000000100000000000000000010000000000000020000000000000000000000000000003000000000080018000000000000000000040004000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200820000000000f9035cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000000000000000000000000000000000000000007d0b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000201cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07cf9015c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0be323bced46a1a49c8da2ab62ad5e974fd50f1dabaeed70b23ca5bcf14bfe4aab8e000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").to_vec(), ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 4235, + proposer_index: 4, + parent_root: hex!("1b31e6264c19bcad120e434e0aede892e7d7c8ed80ab505cb593d9a4a16bc566").into(), + state_root: hex!("725f51771a0ecf72c647a283ab814ca088f998eb8c203181496b0b8e01f624fa").into(), + body_root: hex!("6f1c326d192e7e97e21e27b16fd7f000b8fa09b435ff028849927e382302b0ce").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("1b31e6264c19bcad120e434e0aede892e7d7c8ed80ab505cb593d9a4a16bc566").into(), + hex!("335eb186c077fa7053ec96dcc5d34502c997713d2d5bc4eb74842118d8cd5a64").into(), + hex!("326607faf2a7dfc9cfc4b6895f8f3d92a659552deb2c8fd1e892ec00c86c734c").into(), + hex!("4e20002125d7b6504df7c774f3f48e018e1e6762d03489149670a8335bba1425").into(), + hex!("e76af5cd61aade5aec8282b6f1df9046efa756b0466bba5e49032410f7739a1b").into(), + hex!("ee4dcd9527712116380cddafd120484a3bedf867225bbb86850b84decf6da730").into(), + hex!("e4687a07421d3150439a2cd2f09f3b468145d75b359a2e5fa88dfbec51725b15").into(), + hex!("38eaa78978e95759aa9b6f8504a8dbe36151f20ae41907e6a1ea165700ceefcd").into(), + hex!("1c1b071ec6f13e15c47d07d1bfbcc9135d6a6c819e68e7e6078a2007418c1a23").into(), + hex!("0b3ad7ad193c691c8c4ba1606ad2a90482cd1d033c7db58cfe739d0e20431e9e").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), + hex!("b2ffec5f2c14640305dd941330f09216c53b99d198e93735a400a6d3a4de191f").into(), + ], + finalized_block_root: hex!("08be7a59e947f08cd95c4ef470758730bf9e3b0db0824cb663ea541c39b0e65c").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("5d1186ae041f58785edb2f01248e95832f2e5e5d6c4eb8f7ff2f58980bfc2de9").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("2a66114d20e93082c8e9b47c8d401a937013487d757c9c2f3123cf43dc1f656d").into(), + receipts_root: hex!("106f1eaeac04e469da0020ad5c8a72af66323638bd3f561a3c8236063202c120").into(), + logs_bloom: hex!("00800000000000008000000000000000000000000000004000000000000000000400000000004000000000001000000010000000000000000000001008000000000000000000000001000008000040000000000000000000000000008000080000000000200000000000000000000000000100000000000000000010000000000000020000000000000000000000000000003000000000080018000000000000000000040004000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200820000000000").into(), + prev_randao: hex!("92e063c7e369b74149fdd1d7132ed2f635a19b9d8bff57637b8ee4736576426e").into(), + block_number: 4235, + gas_limit: 30000000, + gas_used: 97692, + timestamp: 1710556655, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("ce24fe3047aa20a8f222cd1d04567c12b39455400d681141962c2130e690953f").into(), + transactions_root: hex!("0c8388731de94771777c60d452077065354d90d6e5088db61fc6a134684195cc").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("99d397fa180078e66cd3a3b77bcb07553052f4e21d447167f3a406f663b14e6a").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("53ddf17147819c1abb918178b0230d965d1bc2c0d389f45e91e54cb1d2d468aa").into(), + ], + } }, }, + finalized_header: BeaconHeader { + slot: 4672, + proposer_index: 4, + parent_root: hex!("951233bf9f4bddfb2fa8f54e3bd0c7883779ef850e13e076baae3130dd7732db").into(), + state_root: hex!("4d303003b8cb097cbcc14b0f551ee70dac42de2c1cc2f4acfca7058ca9713291").into(), + body_root: hex!("664d13952b6f369bf4cf3af74d067ec33616eb57ed3a8a403fd5bae4fbf737dd").into(), + }, + block_roots_root: hex!("af71048297c070e6539cf3b9b90ae07d86d363454606bc239734629e6b49b983").into(), } } diff --git a/bridges/snowbridge/pallets/inbound-queue/src/benchmarking/mod.rs b/bridges/snowbridge/pallets/inbound-queue/src/benchmarking/mod.rs index 931befa2ac6..d59d9275772 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/benchmarking/mod.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/benchmarking/mod.rs @@ -19,8 +19,8 @@ mod benchmarks { let create_message = make_register_token_message(); T::Helper::initialize_storage( - create_message.message.proof.block_hash, - create_message.execution_header, + create_message.finalized_header, + create_message.block_roots_root, ); let sovereign_account = sibling_sovereign_account::(1000u32.into()); diff --git a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs index bdc21fcf037..8acbb0c2916 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs @@ -28,9 +28,6 @@ mod envelope; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -#[cfg(feature = "runtime-benchmarks")] -use snowbridge_beacon_primitives::CompactExecutionHeader; - pub mod weights; #[cfg(test)] @@ -44,7 +41,7 @@ use envelope::Envelope; use frame_support::{ traits::{ fungible::{Inspect, Mutate}, - tokens::Preservation, + tokens::{Fortitude, Preservation}, }, weights::WeightToFee, PalletError, @@ -52,6 +49,7 @@ use frame_support::{ use frame_system::ensure_signed; use scale_info::TypeInfo; use sp_core::{H160, H256}; +use sp_runtime::traits::Zero; use sp_std::{convert::TryFrom, vec}; use xcm::prelude::{ send_xcm, Instruction::SetTopic, Junction::*, Location, SendError as XcmpSendError, SendXcm, @@ -72,6 +70,9 @@ use sp_runtime::{traits::Saturating, SaturatedConversion, TokenError}; pub use weights::WeightInfo; +#[cfg(feature = "runtime-benchmarks")] +use snowbridge_beacon_primitives::BeaconHeader; + type BalanceOf = <::Token as Inspect<::AccountId>>::Balance; @@ -91,7 +92,7 @@ pub mod pallet { #[cfg(feature = "runtime-benchmarks")] pub trait BenchmarkHelper { - fn initialize_storage(block_hash: H256, header: CompactExecutionHeader); + fn initialize_storage(beacon_header: BeaconHeader, block_roots_root: H256); } #[pallet::config] @@ -261,11 +262,19 @@ pub mod pallet { } })?; - // Reward relayer from the sovereign account of the destination parachain - // Expected to fail if sovereign account has no funds + // Reward relayer from the sovereign account of the destination parachain, only if funds + // are available let sovereign_account = sibling_sovereign_account::(channel.para_id); let delivery_cost = Self::calculate_delivery_cost(message.encode().len() as u32); - T::Token::transfer(&sovereign_account, &who, delivery_cost, Preservation::Preserve)?; + let amount = T::Token::reducible_balance( + &sovereign_account, + Preservation::Preserve, + Fortitude::Polite, + ) + .min(delivery_cost); + if !amount.is_zero() { + T::Token::transfer(&sovereign_account, &who, amount, Preservation::Preserve)?; + } // Decode message into XCM let (xcm, fee) = diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index 39e9532ed32..c96c868bc26 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -4,11 +4,13 @@ use super::*; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU128, ConstU32, Everything}, + traits::{ConstU32, Everything}, weights::IdentityFee, }; use hex_literal::hex; -use snowbridge_beacon_primitives::{Fork, ForkVersions}; +use snowbridge_beacon_primitives::{ + types::deneb, BeaconHeader, ExecutionProof, Fork, ForkVersions, VersionedExecutionPayloadHeader, +}; use snowbridge_core::{ gwei, inbound::{Log, Proof, VerificationError}, @@ -20,7 +22,7 @@ use sp_runtime::{ traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, BuildStorage, FixedU128, MultiSignature, }; -use sp_std::convert::From; +use sp_std::{convert::From, default::Default}; use xcm::{latest::SendXcm, prelude::*}; use xcm_executor::AssetsInHolding; @@ -65,6 +67,10 @@ impl frame_system::Config for Test { type Block = Block; } +parameter_types! { + pub const ExistentialDeposit: u128 = 1; +} + impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = (); @@ -72,7 +78,7 @@ impl pallet_balances::Config for Test { type Balance = Balance; type RuntimeEvent = RuntimeEvent; type DustRemoval = (); - type ExistentialDeposit = ConstU128<1>; + type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); type FreezeIdentifier = (); @@ -82,7 +88,6 @@ impl pallet_balances::Config for Test { } parameter_types! { - pub const ExecutionHeadersPruneThreshold: u32 = 10; pub const ChainForkVersions: ForkVersions = ForkVersions{ genesis: Fork { version: [0, 0, 0, 1], // 0x00000001 @@ -110,7 +115,6 @@ parameter_types! { impl snowbridge_pallet_ethereum_client::Config for Test { type RuntimeEvent = RuntimeEvent; type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; type WeightInfo = (); } @@ -139,7 +143,7 @@ parameter_types! { #[cfg(feature = "runtime-benchmarks")] impl BenchmarkHelper for Test { // not implemented since the MockVerifier is used for tests - fn initialize_storage(_: H256, _: CompactExecutionHeader) {} + fn initialize_storage(_: BeaconHeader, _: H256) {} } // Mock XCM sender that always succeeds @@ -335,5 +339,32 @@ pub fn mock_event_log_invalid_gateway() -> Log { } } +pub fn mock_execution_proof() -> ExecutionProof { + ExecutionProof { + header: BeaconHeader::default(), + ancestry_proof: None, + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: Default::default(), + fee_recipient: Default::default(), + state_root: Default::default(), + receipts_root: Default::default(), + logs_bloom: vec![], + prev_randao: Default::default(), + block_number: 0, + gas_limit: 0, + gas_used: 0, + timestamp: 0, + extra_data: vec![], + base_fee_per_gas: Default::default(), + block_hash: Default::default(), + transactions_root: Default::default(), + withdrawals_root: Default::default(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![], + } +} + pub const ASSET_HUB_PARAID: u32 = 1000u32; pub const TEMPLATE_PARAID: u32 = 1001u32; diff --git a/bridges/snowbridge/pallets/inbound-queue/src/test.rs b/bridges/snowbridge/pallets/inbound-queue/src/test.rs index 9a47e475b8c..bd993c968df 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/test.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/test.rs @@ -6,7 +6,7 @@ use frame_support::{assert_noop, assert_ok}; use hex_literal::hex; use snowbridge_core::{inbound::Proof, ChannelId}; use sp_keyring::AccountKeyring as Keyring; -use sp_runtime::{DispatchError, TokenError}; +use sp_runtime::DispatchError; use sp_std::convert::From; use crate::{Error, Event as InboundQueueEvent}; @@ -25,9 +25,8 @@ fn test_submit_happy_path() { let message = Message { event_log: mock_event_log(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; @@ -77,9 +76,8 @@ fn test_submit_xcm_invalid_channel() { let message = Message { event_log: mock_event_log_invalid_channel(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; assert_noop!( @@ -103,9 +101,8 @@ fn test_submit_with_invalid_gateway() { let message = Message { event_log: mock_event_log_invalid_gateway(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; assert_noop!( @@ -129,9 +126,8 @@ fn test_submit_with_invalid_nonce() { let message = Message { event_log: mock_event_log(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); @@ -150,12 +146,12 @@ fn test_submit_with_invalid_nonce() { } #[test] -fn test_submit_no_funds_to_reward_relayers() { +fn test_submit_no_funds_to_reward_relayers_just_ignore() { new_tester().execute_with(|| { let relayer: AccountId = Keyring::Bob.into(); let origin = RuntimeOrigin::signed(relayer); - // Reset balance of sovereign_account to zero so to trigger the FundsUnavailable error + // Reset balance of sovereign_account to zero first let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); Balances::set_balance(&sovereign_account, 0); @@ -163,15 +159,12 @@ fn test_submit_no_funds_to_reward_relayers() { let message = Message { event_log: mock_event_log(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; - assert_noop!( - InboundQueue::submit(origin.clone(), message.clone()), - TokenError::FundsUnavailable - ); + // Check submit successfully in case no funds available + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); }); } @@ -183,9 +176,8 @@ fn test_set_operating_mode() { let message = Message { event_log: mock_event_log(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; @@ -210,3 +202,44 @@ fn test_set_operating_mode_root_only() { ); }); } + +#[test] +fn test_submit_no_funds_to_reward_relayers_and_ed_preserved() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let origin = RuntimeOrigin::signed(relayer); + + // Reset balance of sovereign account to (ED+1) first + let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); + Balances::set_balance(&sovereign_account, ExistentialDeposit::get() + 1); + + // Submit message successfully + let message = Message { + event_log: mock_event_log(), + proof: Proof { + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), + }, + }; + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); + + // Check balance of sovereign account to ED + let amount = Balances::balance(&sovereign_account); + assert_eq!(amount, ExistentialDeposit::get()); + + // Submit another message with nonce set as 2 + let mut event_log = mock_event_log(); + event_log.data[31] = 2; + let message = Message { + event_log, + proof: Proof { + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), + }, + }; + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); + // Check balance of sovereign account as ED does not change + let amount = Balances::balance(&sovereign_account); + assert_eq!(amount, ExistentialDeposit::get()); + }); +} diff --git a/bridges/snowbridge/primitives/beacon/src/lib.rs b/bridges/snowbridge/primitives/beacon/src/lib.rs index 4c569d0176c..6579d0f6096 100644 --- a/bridges/snowbridge/primitives/beacon/src/lib.rs +++ b/bridges/snowbridge/primitives/beacon/src/lib.rs @@ -15,12 +15,12 @@ pub mod updates; mod serde_utils; pub use types::{ - BeaconHeader, CompactBeaconState, CompactExecutionHeader, ExecutionHeaderState, - ExecutionPayloadHeader, FinalizedHeaderState, Fork, ForkData, ForkVersion, ForkVersions, Mode, - PublicKey, Signature, SigningData, SyncAggregate, SyncCommittee, SyncCommitteePrepared, + AncestryProof, BeaconHeader, CompactBeaconState, ExecutionPayloadHeader, ExecutionProof, + FinalizedHeaderState, Fork, ForkData, ForkVersion, ForkVersions, Mode, PublicKey, Signature, + SigningData, SyncAggregate, SyncCommittee, SyncCommitteePrepared, VersionedExecutionPayloadHeader, }; -pub use updates::{CheckpointUpdate, ExecutionHeaderUpdate, NextSyncCommitteeUpdate, Update}; +pub use updates::{CheckpointUpdate, NextSyncCommitteeUpdate, Update}; pub use bits::decompress_sync_committee_bits; pub use bls::{ diff --git a/bridges/snowbridge/primitives/beacon/src/types.rs b/bridges/snowbridge/primitives/beacon/src/types.rs index 2af522f56b0..e12350510c9 100644 --- a/bridges/snowbridge/primitives/beacon/src/types.rs +++ b/bridges/snowbridge/primitives/beacon/src/types.rs @@ -110,14 +110,6 @@ impl<'de> Deserialize<'de> for Signature { } } -#[derive(Copy, Clone, Default, Encode, Decode, TypeInfo, MaxEncodedLen)] -pub struct ExecutionHeaderState { - pub beacon_block_root: H256, - pub beacon_slot: u64, - pub block_hash: H256, - pub block_number: u64, -} - #[derive(Copy, Clone, Default, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct FinalizedHeaderState { pub beacon_block_root: H256, @@ -346,35 +338,6 @@ impl ExecutionPayloadHeader { } } -#[derive( - Default, - Encode, - Decode, - CloneNoBound, - PartialEqNoBound, - RuntimeDebugNoBound, - TypeInfo, - MaxEncodedLen, -)] -pub struct CompactExecutionHeader { - pub parent_hash: H256, - #[codec(compact)] - pub block_number: u64, - pub state_root: H256, - pub receipts_root: H256, -} - -impl From for CompactExecutionHeader { - fn from(execution_payload: ExecutionPayloadHeader) -> Self { - Self { - parent_hash: execution_payload.parent_hash, - block_number: execution_payload.block_number, - state_root: execution_payload.state_root, - receipts_root: execution_payload.receipts_root, - } - } -} - #[derive( Default, Encode, @@ -405,18 +368,6 @@ pub enum VersionedExecutionPayloadHeader { Deneb(deneb::ExecutionPayloadHeader), } -/// Convert VersionedExecutionPayloadHeader to CompactExecutionHeader -impl From for CompactExecutionHeader { - fn from(versioned_execution_header: VersionedExecutionPayloadHeader) -> Self { - match versioned_execution_header { - VersionedExecutionPayloadHeader::Capella(execution_payload_header) => - execution_payload_header.into(), - VersionedExecutionPayloadHeader::Deneb(execution_payload_header) => - execution_payload_header.into(), - } - } -} - impl VersionedExecutionPayloadHeader { pub fn hash_tree_root(&self) -> Result { match self { @@ -448,6 +399,45 @@ impl VersionedExecutionPayloadHeader { execution_payload_header.block_number, } } + + pub fn receipts_root(&self) -> H256 { + match self { + VersionedExecutionPayloadHeader::Capella(execution_payload_header) => + execution_payload_header.receipts_root, + VersionedExecutionPayloadHeader::Deneb(execution_payload_header) => + execution_payload_header.receipts_root, + } + } +} + +#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] +#[cfg_attr( + feature = "std", + derive(serde::Deserialize), + serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) +)] +pub struct ExecutionProof { + /// Header for the beacon block containing the execution payload + pub header: BeaconHeader, + /// Proof that `header` is an ancestor of a finalized header + pub ancestry_proof: Option, + /// The execution header to be verified + pub execution_header: VersionedExecutionPayloadHeader, + /// Merkle proof that execution payload is contained within `header` + pub execution_branch: Vec, +} + +#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] +#[cfg_attr( + feature = "std", + derive(serde::Deserialize), + serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) +)] +pub struct AncestryProof { + /// Merkle proof that `header` is an ancestor of `finalized_header` + pub header_branch: Vec, + /// Root of a finalized block that has already been imported into the light client + pub finalized_block_root: H256, } #[cfg(test)] @@ -576,7 +566,6 @@ pub enum Mode { } pub mod deneb { - use crate::CompactExecutionHeader; use codec::{Decode, Encode}; use frame_support::{CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound}; use scale_info::TypeInfo; @@ -627,15 +616,4 @@ pub mod deneb { pub blob_gas_used: u64, // [New in Deneb:EIP4844] pub excess_blob_gas: u64, // [New in Deneb:EIP4844] } - - impl From for CompactExecutionHeader { - fn from(execution_payload: ExecutionPayloadHeader) -> Self { - Self { - parent_hash: execution_payload.parent_hash, - block_number: execution_payload.block_number, - state_root: execution_payload.state_root, - receipts_root: execution_payload.receipts_root, - } - } - } } diff --git a/bridges/snowbridge/primitives/beacon/src/updates.rs b/bridges/snowbridge/primitives/beacon/src/updates.rs index 1ecd32c6d7b..ca651b5806f 100644 --- a/bridges/snowbridge/primitives/beacon/src/updates.rs +++ b/bridges/snowbridge/primitives/beacon/src/updates.rs @@ -6,7 +6,7 @@ use scale_info::TypeInfo; use sp_core::H256; use sp_std::prelude::*; -use crate::types::{BeaconHeader, SyncAggregate, SyncCommittee, VersionedExecutionPayloadHeader}; +use crate::types::{BeaconHeader, SyncAggregate, SyncCommittee}; #[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] #[cfg_attr( @@ -23,26 +23,13 @@ pub struct CheckpointUpdate { pub block_roots_branch: Vec, } -impl Default for CheckpointUpdate { - fn default() -> Self { - CheckpointUpdate { - header: Default::default(), - current_sync_committee: Default::default(), - current_sync_committee_branch: Default::default(), - validators_root: Default::default(), - block_roots_root: Default::default(), - block_roots_branch: Default::default(), - } - } -} - #[derive( Default, Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo, )] #[cfg_attr( feature = "std", derive(serde::Deserialize), - serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) + serde(bound(serialize = ""), bound(deserialize = "")) )] pub struct Update { /// A recent header attesting to the finalized header, using its `state_root`. @@ -78,33 +65,3 @@ pub struct NextSyncCommitteeUpdate { pub next_sync_committee: SyncCommittee, pub next_sync_committee_branch: Vec, } - -#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] -#[cfg_attr( - feature = "std", - derive(serde::Deserialize), - serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) -)] -pub struct ExecutionHeaderUpdate { - /// Header for the beacon block containing the execution payload - pub header: BeaconHeader, - /// Proof that `header` is an ancestor of a finalized header - pub ancestry_proof: Option, - /// Execution header to be imported - pub execution_header: VersionedExecutionPayloadHeader, - /// Merkle proof that execution payload is contained within `header` - pub execution_branch: Vec, -} - -#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] -#[cfg_attr( - feature = "std", - derive(serde::Deserialize), - serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) -)] -pub struct AncestryProof { - /// Merkle proof that `header` is an ancestor of `finalized_header` - pub header_branch: Vec, - /// Root of a finalized block that has already been imported into the light client - pub finalized_block_root: H256, -} diff --git a/bridges/snowbridge/primitives/core/src/inbound.rs b/bridges/snowbridge/primitives/core/src/inbound.rs index 4b04470ad02..9e8ed789ab5 100644 --- a/bridges/snowbridge/primitives/core/src/inbound.rs +++ b/bridges/snowbridge/primitives/core/src/inbound.rs @@ -5,6 +5,7 @@ use codec::{Decode, Encode}; use frame_support::PalletError; use scale_info::TypeInfo; +use snowbridge_beacon_primitives::{BeaconHeader, ExecutionProof}; use sp_core::{H160, H256}; use sp_runtime::RuntimeDebug; use sp_std::vec::Vec; @@ -25,6 +26,8 @@ pub enum VerificationError { InvalidLog, /// Unable to verify the transaction receipt with the provided proof InvalidProof, + /// Unable to verify the execution header with ancestry proof + InvalidExecutionProof(#[codec(skip)] &'static str), } pub type MessageNonce = u64; @@ -65,10 +68,15 @@ impl Log { /// Inclusion proof for a transaction receipt #[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] pub struct Proof { - // The block hash of the block in which the receipt was included. - pub block_hash: H256, - // The index of the transaction (and receipt) within the block. - pub tx_index: u32, // Proof keys and values (receipts tree) - pub data: (Vec>, Vec>), + pub receipt_proof: (Vec>, Vec>), + // Proof that an execution header was finalized by the beacon chain + pub execution_proof: ExecutionProof, +} + +#[derive(Clone, RuntimeDebug)] +pub struct InboundQueueFixture { + pub message: Message, + pub finalized_header: BeaconHeader, + pub block_roots_root: H256, } diff --git a/bridges/snowbridge/runtime/test-common/src/lib.rs b/bridges/snowbridge/runtime/test-common/src/lib.rs index 7455adf7617..3e2de0e481b 100644 --- a/bridges/snowbridge/runtime/test-common/src/lib.rs +++ b/bridges/snowbridge/runtime/test-common/src/lib.rs @@ -467,7 +467,6 @@ pub fn ethereum_extrinsic( let initial_checkpoint = make_checkpoint(); let update = make_finalized_header_update(); let sync_committee_update = make_sync_committee_update(); - let execution_header_update = make_execution_header_update(); let alice = Alice; let alice_account = alice.to_account_id(); @@ -494,22 +493,12 @@ pub fn ethereum_extrinsic( } .into(); - let execution_header_call: ::RuntimeCall = - snowbridge_pallet_ethereum_client::Call::::submit_execution_header { - update: Box::new(*execution_header_update), - } - .into(); - let update_outcome = construct_and_apply_extrinsic(alice, update_call.into()); assert_ok!(update_outcome); let sync_committee_outcome = construct_and_apply_extrinsic(alice, update_sync_committee_call.into()); assert_ok!(sync_committee_outcome); - - let execution_header_outcome = - construct_and_apply_extrinsic(alice, execution_header_call.into()); - assert_ok!(execution_header_outcome); }); } @@ -548,7 +537,6 @@ pub fn ethereum_to_polkadot_message_extrinsics_work( .execute_with(|| { let initial_checkpoint = make_checkpoint(); let sync_committee_update = make_sync_committee_update(); - let execution_header_update = make_execution_header_update(); let alice = Alice; let alice_account = alice.to_account_id(); @@ -569,18 +557,8 @@ pub fn ethereum_to_polkadot_message_extrinsics_work( } .into(); - let execution_header_call: ::RuntimeCall = - snowbridge_pallet_ethereum_client::Call::::submit_execution_header { - update: Box::new(*execution_header_update), - } - .into(); - let sync_committee_outcome = construct_and_apply_extrinsic(alice, update_sync_committee_call.into()); assert_ok!(sync_committee_outcome); - - let execution_header_outcome = - construct_and_apply_extrinsic(alice, execution_header_call.into()); - assert_ok!(execution_header_outcome); }); } diff --git a/bridges/snowbridge/scripts/contribute-upstream.sh b/bridges/snowbridge/scripts/contribute-upstream.sh index 32005b770ec..529057c3f26 100755 --- a/bridges/snowbridge/scripts/contribute-upstream.sh +++ b/bridges/snowbridge/scripts/contribute-upstream.sh @@ -79,4 +79,10 @@ git fetch parity master git checkout parity/master -- .github git add -- .github +git commit -m "cleanup branch" + +# Fetch the latest from parity master +echo "Fetching latest from Parity master. Resolve merge conflicts, if there are any." +git fetch parity master +git merge parity/master echo "OK" diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index 1804f9d4b67..780ba57f78a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -20,17 +20,17 @@ use frame_support::pallet_prelude::TypeInfo; use hex_literal::hex; use rococo_system_emulated_network::penpal_emulated_chain::CustomizableAssetFromSystemAssetHub; use rococo_westend_system_emulated_network::BridgeHubRococoParaSender as BridgeHubRococoSender; -use snowbridge_core::outbound::OperatingMode; +use snowbridge_core::{inbound::InboundQueueFixture, outbound::OperatingMode}; use snowbridge_pallet_inbound_queue_fixtures::{ - register_token::make_register_token_message, - register_token_with_insufficient_fee::make_register_token_with_insufficient_fee_message, - send_token::make_send_token_message, send_token_to_penpal::make_send_token_to_penpal_message, - InboundQueueFixture, + register_token::make_register_token_message, send_token::make_send_token_message, + send_token_to_penpal::make_send_token_to_penpal_message, }; use snowbridge_pallet_system; -use snowbridge_router_primitives::inbound::GlobalConsensusEthereumConvertsFor; +use snowbridge_router_primitives::inbound::{ + Command, GlobalConsensusEthereumConvertsFor, MessageV1, VersionedMessage, +}; use sp_core::H256; -use sp_runtime::{ArithmeticError::Underflow, DispatchError::Arithmetic}; +use sp_runtime::{DispatchError::Token, TokenError::FundsUnavailable}; use testnet_parachains_constants::rococo::snowbridge::EthereumNetwork; const INITIAL_FUND: u128 = 5_000_000_000 * ROCOCO_ED; @@ -39,6 +39,7 @@ const TREASURY_ACCOUNT: [u8; 32] = hex!("6d6f646c70792f74727372790000000000000000000000000000000000000000"); const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); +const INSUFFICIENT_XCM_FEE: u128 = 1000; #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] pub enum ControlCall { @@ -56,13 +57,11 @@ pub enum SnowbridgeControl { } pub fn send_inbound_message(fixture: InboundQueueFixture) -> DispatchResult { - EthereumBeaconClient::store_execution_header( - fixture.message.proof.block_hash, - fixture.execution_header, - 0, - H256::default(), - ); - + EthereumBeaconClient::store_finalized_header( + fixture.finalized_header, + fixture.block_roots_root, + ) + .unwrap(); EthereumInboundQueue::submit( RuntimeOrigin::signed(BridgeHubRococoSender::get()), fixture.message, @@ -237,6 +236,46 @@ fn register_weth_token_from_ethereum_to_asset_hub() { }); } +/// Tests the registering of a token as an asset on AssetHub, and then subsequently sending +/// a token from Ethereum to AssetHub. +#[test] +fn send_token_from_ethereum_to_asset_hub() { + BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), INITIAL_FUND); + + // Fund ethereum sovereign on AssetHub + AssetHubRococo::fund_accounts(vec![(AssetHubRococoReceiver::get(), INITIAL_FUND)]); + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Construct RegisterToken message and sent to inbound queue + send_inbound_message(make_register_token_message()).unwrap(); + + // Construct SendToken message and sent to inbound queue + send_inbound_message(make_send_token_message()).unwrap(); + + // Check that the message was sent + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the token was received and issued as a foreign asset on AssetHub + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, + ] + ); + }); +} + /// Tests sending a token to a 3rd party parachain, called PenPal. The token reserve is /// still located on AssetHub. #[test] @@ -296,6 +335,10 @@ fn send_token_from_ethereum_to_penpal() { // Construct RegisterToken message and sent to inbound queue send_inbound_message(make_register_token_message()).unwrap(); + // Construct SendToken message to AssetHub(only for increase the nonce as the same order in + // smoke test) + send_inbound_message(make_send_token_message()).unwrap(); + // Construct SendToken message and sent to inbound queue send_inbound_message(make_send_token_to_penpal_message()).unwrap(); @@ -331,46 +374,6 @@ fn send_token_from_ethereum_to_penpal() { }); } -/// Tests the registering of a token as an asset on AssetHub, and then subsequently sending -/// a token from Ethereum to AssetHub. -#[test] -fn send_token_from_ethereum_to_asset_hub() { - BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), INITIAL_FUND); - - // Fund ethereum sovereign on AssetHub - AssetHubRococo::fund_accounts(vec![(AssetHubRococoReceiver::get(), INITIAL_FUND)]); - - BridgeHubRococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // Construct RegisterToken message and sent to inbound queue - send_inbound_message(make_register_token_message()).unwrap(); - - // Construct SendToken message and sent to inbound queue - send_inbound_message(make_send_token_message()).unwrap(); - - // Check that the message was sent - assert_expected_events!( - BridgeHubRococo, - vec![ - RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, - ] - ); - }); - - AssetHubRococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // Check that the token was received and issued as a foreign asset on AssetHub - assert_expected_events!( - AssetHubRococo, - vec![ - RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, - ] - ); - }); -} - /// Tests the full cycle of token transfers: /// - registering a token on AssetHub /// - sending a token to AssetHub @@ -507,16 +510,35 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { }); } +#[test] +fn send_token_from_ethereum_to_asset_hub_fail_for_insufficient_fund() { + // Insufficient fund + BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), 1_000); + + BridgeHubRococo::execute_with(|| { + assert_err!(send_inbound_message(make_register_token_message()), Token(FundsUnavailable)); + }); +} + #[test] fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() { BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), INITIAL_FUND); BridgeHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - - // Construct RegisterToken message and sent to inbound queue - let message = make_register_token_with_insufficient_fee_message(); - send_inbound_message(message).unwrap(); + type EthereumInboundQueue = + ::EthereumInboundQueue; + let message_id: H256 = [0; 32].into(); + let message = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::RegisterToken { + token: WETH.into(), + // Insufficient fee which should trigger the trap + fee: INSUFFICIENT_XCM_FEE, + }, + }); + let (xcm, _) = EthereumInboundQueue::do_convert(message_id, message).unwrap(); + let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubRococo::para_id().into()).unwrap(); assert_expected_events!( BridgeHubRococo, @@ -537,13 +559,3 @@ fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() { ); }); } - -#[test] -fn send_token_from_ethereum_to_asset_hub_fail_for_insufficient_fund() { - // Insufficient fund - BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), 1_000); - - BridgeHubRococo::execute_with(|| { - assert_err!(send_inbound_message(make_register_token_message()), Arithmetic(Underflow)); - }); -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 47c3ed36888..f4ff985e277 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -286,8 +286,8 @@ impl Contains for SafeCallFilter { match call { RuntimeCall::System(frame_system::Call::set_storage { items }) if items.iter().all(|(k, _)| { - k.eq(&bridging::XcmBridgeHubRouterByteFee::key()) | - k.eq(&bridging::XcmBridgeHubRouterBaseFee::key()) | + k.eq(&bridging::XcmBridgeHubRouterByteFee::key()) || + k.eq(&bridging::XcmBridgeHubRouterBaseFee::key()) || k.eq(&bridging::to_ethereum::BridgeHubEthereumBaseFee::key()) }) => return true, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index f0aa4f8e91c..fd6d44ec275 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -100,8 +100,6 @@ use parachains_common::{ AVERAGE_ON_INITIALIZE_RATIO, NORMAL_DISPATCH_RATIO, }; -use polkadot_runtime_common::prod_or_fast; - #[cfg(feature = "runtime-benchmarks")] use benchmark_helpers::DoNothingRouter; @@ -515,14 +513,14 @@ parameter_types! { pub mod benchmark_helpers { use crate::{EthereumBeaconClient, Runtime, RuntimeOrigin}; use codec::Encode; - use snowbridge_beacon_primitives::CompactExecutionHeader; + use snowbridge_beacon_primitives::BeaconHeader; use snowbridge_pallet_inbound_queue::BenchmarkHelper; use sp_core::H256; use xcm::latest::{Assets, Location, SendError, SendResult, SendXcm, Xcm, XcmHash}; impl BenchmarkHelper for Runtime { - fn initialize_storage(block_hash: H256, header: CompactExecutionHeader) { - EthereumBeaconClient::store_execution_header(block_hash, header, 0, H256::default()) + fn initialize_storage(beacon_header: BeaconHeader, block_roots_root: H256) { + EthereumBeaconClient::store_finalized_header(beacon_header, block_roots_root).unwrap(); } } @@ -643,14 +641,9 @@ parameter_types! { }; } -parameter_types! { - pub const MaxExecutionHeadersToKeep: u32 = prod_or_fast!(8192 * 2, 1000); -} - impl snowbridge_pallet_ethereum_client::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = MaxExecutionHeadersToKeep; type WeightInfo = weights::snowbridge_pallet_ethereum_client::WeightInfo; } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_ethereum_client.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_ethereum_client.rs index 0d5f29c6ff2..c8017939b62 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_ethereum_client.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_ethereum_client.rs @@ -126,26 +126,4 @@ impl snowbridge_pallet_ethereum_client::WeightInfo for .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: EthereumBeaconClient LatestFinalizedBlockRoot (r:1 w:0) - /// Proof: EthereumBeaconClient LatestFinalizedBlockRoot (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient FinalizedBeaconState (r:1 w:0) - /// Proof: EthereumBeaconClient FinalizedBeaconState (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient LatestExecutionState (r:1 w:1) - /// Proof: EthereumBeaconClient LatestExecutionState (max_values: Some(1), max_size: Some(80), added: 575, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient ExecutionHeaderIndex (r:1 w:1) - /// Proof: EthereumBeaconClient ExecutionHeaderIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient ExecutionHeaderMapping (r:1 w:1) - /// Proof: EthereumBeaconClient ExecutionHeaderMapping (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient ExecutionHeaders (r:0 w:1) - /// Proof: EthereumBeaconClient ExecutionHeaders (max_values: None, max_size: Some(136), added: 2611, mode: MaxEncodedLen) - fn submit_execution_header() -> Weight { - // Proof Size summary in bytes: - // Measured: `386` - // Estimated: `3537` - // Minimum execution time: 108_761_000 picoseconds. - Weight::from_parts(113_158_000, 0) - .saturating_add(Weight::from_parts(0, 3537)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_inbound_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_inbound_queue.rs index faf404f90cb..153c1d363be 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_inbound_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_inbound_queue.rs @@ -58,12 +58,12 @@ impl snowbridge_pallet_inbound_queue::WeightInfo for We /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `457` - // Estimated: `3601` - // Minimum execution time: 69_000_000 picoseconds. - Weight::from_parts(70_000_000, 0) - .saturating_add(Weight::from_parts(0, 3601)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `800` + // Estimated: `7200` + // Minimum execution time: 200_000_000 picoseconds. + Weight::from_parts(200_000_000, 0) + .saturating_add(Weight::from_parts(0, 7200)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(6)) } } diff --git a/prdoc/pr_3761.prdoc b/prdoc/pr_3761.prdoc new file mode 100644 index 00000000000..65b8c396fe3 --- /dev/null +++ b/prdoc/pr_3761.prdoc @@ -0,0 +1,25 @@ +title: "Snowbridge: Synchronize from Snowfork repository" + +doc: + - audience: Runtime Dev + description: | + This PR improves the beacon client to send the execution header along with the message as proof and removes the verification and storing of all execution headers. + If the AH sovereign account is depleted and relayer rewards cannot be paid, the message should still be processed. + +crates: +- name: snowbridge-pallet-ethereum-client + bump: minor +- name: snowbridge-pallet-inbound-queue + bump: minor +- name: snowbridge-beacon-primitives + bump: minor +- name: snowbridge-core + bump: minor +- name: snowbridge-runtime-test-common + bump: minor +- name: asset-hub-rococo-runtime + bump: minor +- name: bridge-hub-rococo-runtime + bump: minor +- name: bridge-hub-rococo-integration-tests + bump: minor -- GitLab From 0becc45bd826aea6ec128da8525ed73b3657d474 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 2 Apr 2024 18:06:01 +0200 Subject: [PATCH 086/128] sp_runtime: TryFrom for &str (#3942) Added `TryFrom<&'a RuntimeString> for &'a str` --- substrate/primitives/runtime/src/runtime_string.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/substrate/primitives/runtime/src/runtime_string.rs b/substrate/primitives/runtime/src/runtime_string.rs index aa0bd52e56f..607ae59db63 100644 --- a/substrate/primitives/runtime/src/runtime_string.rs +++ b/substrate/primitives/runtime/src/runtime_string.rs @@ -61,6 +61,19 @@ impl From<&'static str> for RuntimeString { } } +impl<'a> TryFrom<&'a RuntimeString> for &'a str { + type Error = core::str::Utf8Error; + fn try_from(from: &'a RuntimeString) -> core::result::Result<&'a str, Self::Error> { + match from { + #[cfg(feature = "std")] + RuntimeString::Owned(string) => Ok(string.as_str()), + #[cfg(not(feature = "std"))] + RuntimeString::Owned(vec) => core::str::from_utf8(&vec), + RuntimeString::Borrowed(str) => Ok(str), + } + } +} + #[cfg(feature = "std")] impl From for String { fn from(string: RuntimeString) -> Self { -- GitLab From f88190a5201954bd0f5e7802050e5db2b2370159 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dino=20Pa=C4=8Dandi?= <3002868+Dinonard@users.noreply.github.com> Date: Tue, 2 Apr 2024 21:08:02 +0200 Subject: [PATCH 087/128] SortedMembers::add for pallet-membership benchmarks (#3729) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds implementation for `SortedMembers::add` for _pallet-membership_ benchmarks. --------- Co-authored-by: Bastian Köcher Co-authored-by: command-bot <> --- substrate/frame/membership/src/lib.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/substrate/frame/membership/src/lib.rs b/substrate/frame/membership/src/lib.rs index 426fc985a52..aa6be6497ee 100644 --- a/substrate/frame/membership/src/lib.rs +++ b/substrate/frame/membership/src/lib.rs @@ -369,6 +369,18 @@ impl, I: 'static> SortedMembers for Pallet { fn count() -> usize { Members::::decode_len().unwrap_or(0) } + + #[cfg(feature = "runtime-benchmarks")] + fn add(new_member: &T::AccountId) { + use frame_support::{assert_ok, traits::EnsureOrigin}; + let new_member_lookup = T::Lookup::unlookup(new_member.clone()); + + if let Ok(origin) = T::AddOrigin::try_successful_origin() { + assert_ok!(Pallet::::add_member(origin, new_member_lookup,)); + } else { + log::error!(target: LOG_TARGET, "Failed to add `{new_member:?}` in `SortedMembers::add`.") + } + } } #[cfg(feature = "runtime-benchmarks")] -- GitLab From 665e3654ceca5a34e8ada66a9805fa7b76fc9ebb Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Tue, 2 Apr 2024 21:27:11 +0200 Subject: [PATCH 088/128] Remove nextest filtration (#3885) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes https://github.com/paritytech/polkadot-sdk/issues/3884#issuecomment-2026058687 After moving regression tests to benchmarks (https://github.com/paritytech/polkadot-sdk/pull/3741) we don't need to filter tests anymore. --------- Signed-off-by: Alexandru Vasile Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Co-authored-by: Alin Dima Co-authored-by: Andrei Sandu Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Javier Viola <363911+pepoviola@users.noreply.github.com> Co-authored-by: Serban Iorga Co-authored-by: Adrian Catangiu Co-authored-by: Bastian Köcher Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Co-authored-by: Niklas Adolfsson Co-authored-by: Dastan <88332432+dastansam@users.noreply.github.com> Co-authored-by: Liam Aharon Co-authored-by: Clara van Staden Co-authored-by: Ron Co-authored-by: Vincent Geddes Co-authored-by: Svyatoslav Nikolsky Co-authored-by: Bastian Köcher --- .gitlab/pipeline/test.yml | 3 +-- substrate/frame/core-fellowship/src/benchmarking.rs | 5 +++++ substrate/frame/core-fellowship/src/lib.rs | 3 ++- substrate/frame/scheduler/src/tests.rs | 5 ++++- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index af261a893da..48c84b472b4 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -25,7 +25,6 @@ test-linux-stable: # "upgrade_version_checks_should_work" is currently failing - | time cargo nextest run \ - --filter-expr 'not deps(/polkadot-subsystem-bench/)' \ --workspace \ --locked \ --release \ @@ -70,7 +69,7 @@ test-linux-stable-runtime-benchmarks: # but still want to have debug assertions. RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" script: - - time cargo nextest run --filter-expr 'not deps(/polkadot-subsystem-bench/)' --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet + - time cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet # can be used to run all tests # test-linux-stable-all: diff --git a/substrate/frame/core-fellowship/src/benchmarking.rs b/substrate/frame/core-fellowship/src/benchmarking.rs index ddde70bd7ce..fd5453310be 100644 --- a/substrate/frame/core-fellowship/src/benchmarking.rs +++ b/substrate/frame/core-fellowship/src/benchmarking.rs @@ -149,6 +149,11 @@ mod benchmarks { #[benchmark] fn promote() -> Result<(), BenchmarkError> { + // Ensure that the `min_promotion_period` wont get in our way. + let mut params = Params::::get(); + params.min_promotion_period = [Zero::zero(); RANK_COUNT]; + Params::::put(¶ms); + let member = make_member::(1)?; ensure_evidence::(&member)?; diff --git a/substrate/frame/core-fellowship/src/lib.rs b/substrate/frame/core-fellowship/src/lib.rs index d1b81c3ca13..afb188261fd 100644 --- a/substrate/frame/core-fellowship/src/lib.rs +++ b/substrate/frame/core-fellowship/src/lib.rs @@ -149,7 +149,8 @@ pub mod pallet { }; use frame_system::{ensure_root, pallet_prelude::*}; - const RANK_COUNT: usize = 9; + /// Number of available ranks. + pub(crate) const RANK_COUNT: usize = 9; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); diff --git a/substrate/frame/scheduler/src/tests.rs b/substrate/frame/scheduler/src/tests.rs index f251dde99a8..3023a370a4b 100644 --- a/substrate/frame/scheduler/src/tests.rs +++ b/substrate/frame/scheduler/src/tests.rs @@ -1501,8 +1501,11 @@ fn scheduler_handles_periodic_unavailable_preimage() { run_to_block(4); assert_eq!(logger::log().len(), 1); - // Unnote the preimage + // As the public api doesn't support to remove a noted preimage, we need to first unnote it + // and then request it again. Basically this should not happen in real life (whatever you + // call real life;). Preimage::unnote(&hash); + Preimage::request(&hash); // Does not ever execute again. run_to_block(100); -- GitLab From e8e201f0ffc657d227ab869a778e37a876af2666 Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Wed, 3 Apr 2024 11:34:50 +0300 Subject: [PATCH 089/128] statement-distribution: fix filtering of statements for elastic parachains (#3879) fixes https://github.com/paritytech/polkadot-sdk/issues/3775 Additionally moves the claim queue fetch utilities into `subsystem-util`. TODO: - [x] fix tests - [x] add elastic scaling tests --------- Signed-off-by: Andrei Sandu --- .../node/collation-generation/src/error.rs | 2 + polkadot/node/collation-generation/src/lib.rs | 54 ++------ .../node/collation-generation/src/tests.rs | 15 +- .../statement-distribution/src/error.rs | 3 + .../statement-distribution/src/v2/mod.rs | 131 +++++++++++++----- .../src/v2/tests/cluster.rs | 60 ++++++++ .../src/v2/tests/grid.rs | 23 ++- .../src/v2/tests/mod.rs | 29 +++- polkadot/node/subsystem-util/src/vstaging.rs | 46 +++++- 9 files changed, 265 insertions(+), 98 deletions(-) diff --git a/polkadot/node/collation-generation/src/error.rs b/polkadot/node/collation-generation/src/error.rs index 852c50f3068..f04e3c4f20b 100644 --- a/polkadot/node/collation-generation/src/error.rs +++ b/polkadot/node/collation-generation/src/error.rs @@ -27,6 +27,8 @@ pub enum Error { #[error(transparent)] Util(#[from] polkadot_node_subsystem_util::Error), #[error(transparent)] + UtilRuntime(#[from] polkadot_node_subsystem_util::runtime::Error), + #[error(transparent)] Erasure(#[from] polkadot_erasure_coding::Error), #[error("Parachain backing state not available in runtime.")] MissingParaBackingState, diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index 3164f6078bc..fb82871bb15 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -38,25 +38,23 @@ use polkadot_node_primitives::{ SubmitCollationParams, }; use polkadot_node_subsystem::{ - messages::{CollationGenerationMessage, CollatorProtocolMessage, RuntimeApiRequest}, + messages::{CollationGenerationMessage, CollatorProtocolMessage}, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, RuntimeApiError, SpawnedSubsystem, SubsystemContext, SubsystemError, SubsystemResult, }; use polkadot_node_subsystem_util::{ - has_required_runtime, request_async_backing_params, request_availability_cores, - request_claim_queue, request_para_backing_state, request_persisted_validation_data, - request_validation_code, request_validation_code_hash, request_validators, + request_async_backing_params, request_availability_cores, request_para_backing_state, + request_persisted_validation_data, request_validation_code, request_validation_code_hash, + request_validators, + vstaging::{fetch_claim_queue, fetch_next_scheduled_on_core}, }; use polkadot_primitives::{ collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt, CollatorPair, CoreIndex, CoreState, Hash, Id as ParaId, OccupiedCoreAssumption, - PersistedValidationData, ScheduledCore, ValidationCodeHash, + PersistedValidationData, ValidationCodeHash, }; use sp_core::crypto::Pair; -use std::{ - collections::{BTreeMap, VecDeque}, - sync::Arc, -}; +use std::sync::Arc; mod error; @@ -228,7 +226,9 @@ async fn handle_new_activations( let availability_cores = availability_cores??; let async_backing_params = async_backing_params?.ok(); let n_validators = validators??.len(); - let maybe_claim_queue = fetch_claim_queue(ctx.sender(), relay_parent).await?; + let maybe_claim_queue = fetch_claim_queue(ctx.sender(), relay_parent) + .await + .map_err(crate::error::Error::UtilRuntime)?; // The loop bellow will fill in cores that the para is allowed to build on. let mut cores_to_build_on = Vec::new(); @@ -655,37 +655,3 @@ fn erasure_root( let chunks = polkadot_erasure_coding::obtain_chunks_v1(n_validators, &available_data)?; Ok(polkadot_erasure_coding::branches(&chunks).root()) } - -// Checks if the runtime supports `request_claim_queue` and executes it. Returns `Ok(None)` -// otherwise. Any [`RuntimeApiError`]s are bubbled up to the caller. -async fn fetch_claim_queue( - sender: &mut impl overseer::CollationGenerationSenderTrait, - relay_parent: Hash, -) -> crate::error::Result>>> { - if has_required_runtime( - sender, - relay_parent, - RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, - ) - .await - { - let res = request_claim_queue(relay_parent, sender).await.await??; - Ok(Some(res)) - } else { - gum::trace!(target: LOG_TARGET, "Runtime doesn't support `request_claim_queue`"); - Ok(None) - } -} - -// Returns the next scheduled `ParaId` for a core in the claim queue, wrapped in `ScheduledCore`. -// This function is supposed to be used in `handle_new_activations` hence the return type. -fn fetch_next_scheduled_on_core( - claim_queue: &BTreeMap>, - core_idx: CoreIndex, -) -> Option { - claim_queue - .get(&core_idx)? - .front() - .cloned() - .map(|para_id| ScheduledCore { para_id, collator: None }) -} diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs index 923a21e86fb..781d27188df 100644 --- a/polkadot/node/collation-generation/src/tests.rs +++ b/polkadot/node/collation-generation/src/tests.rs @@ -28,7 +28,7 @@ use polkadot_node_subsystem::{ ActivatedLeaf, }; use polkadot_node_subsystem_test_helpers::{subsystem_test_harness, TestSubsystemContextHandle}; -use polkadot_node_subsystem_util::TimeoutExt; +use polkadot_node_subsystem_util::{vstaging::ClaimQueueSnapshot, TimeoutExt}; use polkadot_primitives::{ async_backing::{BackingState, CandidatePendingAvailability}, AsyncBackingParams, BlockNumber, CollatorPair, HeadData, PersistedValidationData, @@ -36,7 +36,10 @@ use polkadot_primitives::{ }; use rstest::rstest; use sp_keyring::sr25519::Keyring as Sr25519Keyring; -use std::pin::Pin; +use std::{ + collections::{BTreeMap, VecDeque}, + pin::Pin, +}; use test_helpers::{ dummy_candidate_descriptor, dummy_hash, dummy_head_data, dummy_validator, make_candidate, }; @@ -617,7 +620,7 @@ fn fallback_when_no_validation_code_hash_api(#[case] runtime_version: u32) { _hash, RuntimeApiRequest::ClaimQueue(tx), ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { - let res = BTreeMap::>::new(); + let res = ClaimQueueSnapshot::new(); tx.send(Ok(res)).unwrap(); }, Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -780,7 +783,7 @@ fn distribute_collation_for_occupied_core_with_async_backing_enabled(#[case] run candidate_hash: Default::default(), candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), })]; - let claim_queue = BTreeMap::from([(CoreIndex::from(0), VecDeque::from([para_id]))]); + let claim_queue = ClaimQueueSnapshot::from([(CoreIndex::from(0), VecDeque::from([para_id]))]); test_harness(|mut virtual_overseer| async move { helpers::initialize_collator(&mut virtual_overseer, para_id).await; @@ -962,7 +965,7 @@ fn no_collation_is_distributed_for_occupied_core_with_async_backing_disabled( candidate_hash: Default::default(), candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), })]; - let claim_queue = BTreeMap::from([(CoreIndex::from(0), VecDeque::from([para_id]))]); + let claim_queue = ClaimQueueSnapshot::from([(CoreIndex::from(0), VecDeque::from([para_id]))]); test_harness(|mut virtual_overseer| async move { helpers::initialize_collator(&mut virtual_overseer, para_id).await; @@ -1050,7 +1053,7 @@ mod helpers { async_backing_params: AsyncBackingParams, cores: Vec, runtime_version: u32, - claim_queue: BTreeMap>, + claim_queue: ClaimQueueSnapshot, ) { assert_matches!( overseer_recv(virtual_overseer).await, diff --git a/polkadot/node/network/statement-distribution/src/error.rs b/polkadot/node/network/statement-distribution/src/error.rs index a712ab6da43..d7f52162fe2 100644 --- a/polkadot/node/network/statement-distribution/src/error.rs +++ b/polkadot/node/network/statement-distribution/src/error.rs @@ -81,6 +81,9 @@ pub enum Error { #[error("Fetching validator groups failed {0:?}")] FetchValidatorGroups(RuntimeApiError), + #[error("Fetching claim queue failed {0:?}")] + FetchClaimQueue(runtime::Error), + #[error("Attempted to share statement when not a validator or not assigned")] InvalidShare, diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index d782e37f10b..b9f6f705ed8 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -46,6 +46,7 @@ use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::ReputationAggregator, runtime::{request_min_backing_votes, ProspectiveParachainsMode}, + vstaging::fetch_claim_queue, }; use polkadot_primitives::{ AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, CoreState, GroupIndex, @@ -149,10 +150,9 @@ pub(crate) const REQUEST_RETRY_DELAY: Duration = Duration::from_secs(1); struct PerRelayParentState { local_validator: Option, statement_store: StatementStore, - availability_cores: Vec, - group_rotation_info: GroupRotationInfo, seconding_limit: usize, session: SessionIndex, + groups_per_para: HashMap>, } impl PerRelayParentState { @@ -563,11 +563,13 @@ pub(crate) async fn handle_active_leaves_update( activated: &ActivatedLeaf, leaf_mode: ProspectiveParachainsMode, ) -> JfyiErrorResult<()> { - let seconding_limit = match leaf_mode { + let max_candidate_depth = match leaf_mode { ProspectiveParachainsMode::Disabled => return Ok(()), - ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } => max_candidate_depth + 1, + ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } => max_candidate_depth, }; + let seconding_limit = max_candidate_depth + 1; + state .implicit_view .activate_leaf(ctx.sender(), activated.hash) @@ -693,15 +695,23 @@ pub(crate) async fn handle_active_leaves_update( } }); + let groups_per_para = determine_groups_per_para( + ctx.sender(), + new_relay_parent, + availability_cores, + group_rotation_info, + max_candidate_depth, + ) + .await; + state.per_relay_parent.insert( new_relay_parent, PerRelayParentState { local_validator, statement_store: StatementStore::new(&per_session.groups), - availability_cores, - group_rotation_info, seconding_limit, session: session_index, + groups_per_para, }, ); } @@ -2126,17 +2136,64 @@ async fn provide_candidate_to_grid( } } -fn group_for_para( - availability_cores: &[CoreState], - group_rotation_info: &GroupRotationInfo, - para_id: ParaId, -) -> Option { - // Note: this won't work well for on-demand parachains as it assumes that core assignments are - // fixed across blocks. - let core_index = availability_cores.iter().position(|c| c.para_id() == Some(para_id)); +// Utility function to populate per relay parent `ParaId` to `GroupIndex` mappings. +async fn determine_groups_per_para( + sender: &mut impl overseer::StatementDistributionSenderTrait, + relay_parent: Hash, + availability_cores: Vec, + group_rotation_info: GroupRotationInfo, + max_candidate_depth: usize, +) -> HashMap> { + let maybe_claim_queue = fetch_claim_queue(sender, relay_parent) + .await + .unwrap_or_else(|err| { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + ?err, + "determine_groups_per_para: `claim_queue` API not available, falling back to iterating availability cores" + ); + None + }); + + let n_cores = availability_cores.len(); + + // Determine the core indices occupied by each para at the current relay parent. To support + // on-demand parachains we also consider the core indices at next block if core has a candidate + // pending availability. + let para_core_indices: Vec<_> = if let Some(claim_queue) = maybe_claim_queue { + claim_queue + .into_iter() + .filter_map(|(core_index, paras)| Some((*paras.front()?, core_index))) + .collect() + } else { + availability_cores + .into_iter() + .enumerate() + .filter_map(|(index, core)| match core { + CoreState::Scheduled(scheduled_core) => + Some((scheduled_core.para_id, CoreIndex(index as u32))), + CoreState::Occupied(occupied_core) => + if max_candidate_depth >= 1 { + occupied_core + .next_up_on_available + .map(|scheduled_core| (scheduled_core.para_id, CoreIndex(index as u32))) + } else { + None + }, + CoreState::Free => None, + }) + .collect() + }; - core_index - .map(|c| group_rotation_info.group_for_core(CoreIndex(c as _), availability_cores.len())) + let mut groups_per_para = HashMap::new(); + // Map from `CoreIndex` to `GroupIndex` and collect as `HashMap`. + for (para, core_index) in para_core_indices { + let group_index = group_rotation_info.group_for_core(core_index, n_cores); + groups_per_para.entry(para).or_insert_with(Vec::new).push(group_index) + } + + groups_per_para } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] @@ -2192,18 +2249,23 @@ async fn fragment_tree_update_inner( let confirmed_candidate = state.candidates.get_confirmed(&candidate_hash); let prs = state.per_relay_parent.get_mut(&receipt.descriptor().relay_parent); if let (Some(confirmed), Some(prs)) = (confirmed_candidate, prs) { - let group_index = group_for_para( - &prs.availability_cores, - &prs.group_rotation_info, - receipt.descriptor().para_id, - ); - let per_session = state.per_session.get(&prs.session); - if let (Some(per_session), Some(group_index)) = (per_session, group_index) { + let group_index = confirmed.group_index(); + + // Sanity check if group_index is valid for this para at relay parent. + let Some(expected_groups) = prs.groups_per_para.get(&receipt.descriptor().para_id) + else { + continue + }; + if !expected_groups.iter().any(|g| *g == group_index) { + continue + } + + if let Some(per_session) = per_session { send_backing_fresh_statements( ctx, candidate_hash, - group_index, + confirmed.group_index(), &receipt.descriptor().relay_parent, prs, confirmed, @@ -2311,13 +2373,12 @@ async fn handle_incoming_manifest_common<'a, Context>( Some(x) => x, }; - let expected_group = group_for_para( - &relay_parent_state.availability_cores, - &relay_parent_state.group_rotation_info, - para_id, - ); + let Some(expected_groups) = relay_parent_state.groups_per_para.get(¶_id) else { + modify_reputation(reputation, ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; + return None + }; - if expected_group != Some(manifest_summary.claimed_group_index) { + if !expected_groups.iter().any(|g| g == &manifest_summary.claimed_group_index) { modify_reputation(reputation, ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; return None } @@ -3037,13 +3098,11 @@ pub(crate) async fn handle_response( relay_parent_state.session, |v| per_session.session_info.validators.get(v).map(|x| x.clone()), |para, g_index| { - let expected_group = group_for_para( - &relay_parent_state.availability_cores, - &relay_parent_state.group_rotation_info, - para, - ); + let Some(expected_groups) = relay_parent_state.groups_per_para.get(¶) else { + return false + }; - Some(g_index) == expected_group + expected_groups.iter().any(|g| g == &g_index) }, disabled_mask, ); diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs index a944a9cd6d0..4fb033e08ce 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs @@ -312,6 +312,66 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { }); } +// Both validators in the test are part of backing groups assigned to same parachain +#[test] +fn elastic_scaling_useful_cluster_statement_from_non_cluster_peer_rejected() { + let config = TestConfig { + validator_count: 20, + group_size: 3, + local_validator: LocalRole::Validator, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + let test_leaf = state.make_dummy_leaf_with_multiple_cores_per_para(relay_parent, 3); + + // Peer A is not in our group, but its group is assigned to same para as we are. + let not_our_group = GroupIndex(1); + + let that_group_validators = state.group_validators(not_our_group, false); + let v_non = that_group_validators[0]; + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_non)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + activate_leaf(&mut overseer, &test_leaf, &state, true, vec![]).await; + + let statement = state + .sign_statement( + v_non, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == COST_UNEXPECTED_STATEMENT_INVALID_SENDER.into() => { } + ); + + overseer + }); +} + #[test] fn statement_from_non_cluster_originator_unexpected() { let config = TestConfig { diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index 38a12cf32e3..9d00a92e742 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -1829,9 +1829,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { }); } -// Grid statements imported to backing once candidate enters hypothetical frontier. -#[test] -fn grid_statements_imported_to_backing() { +fn inner_grid_statements_imported_to_backing(groups_for_first_para: usize) { let validator_count = 6; let group_size = 3; let config = TestConfig { @@ -1851,9 +1849,12 @@ fn grid_statements_imported_to_backing() { let local_group_index = local_validator.group_index.unwrap(); let other_group = next_group_index(local_group_index, validator_count, group_size); - let other_para = ParaId::from(other_group.0); - let test_leaf = state.make_dummy_leaf(relay_parent); + // Other para is same para for elastic scaling test (groups_for_first_para > 1) + let other_para = ParaId::from((groups_for_first_para == 1) as u32); + + let test_leaf = + state.make_dummy_leaf_with_multiple_cores_per_para(relay_parent, groups_for_first_para); let (candidate, pvd) = make_candidate( relay_parent, @@ -2018,6 +2019,18 @@ fn grid_statements_imported_to_backing() { overseer }); } +// Grid statements imported to backing once candidate enters hypothetical frontier. +#[test] +fn grid_statements_imported_to_backing() { + inner_grid_statements_imported_to_backing(1); +} + +// Grid statements imported to backing once candidate enters hypothetical frontier. +// All statements are for candidates of the same parachain but from different backing groups. +#[test] +fn elastic_scaling_grid_statements_imported_to_backing() { + inner_grid_statements_imported_to_backing(2); +} #[test] fn advertisements_rejected_from_incorrect_peers() { diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 82986a0330e..e98b1107931 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -177,20 +177,39 @@ impl TestState { } fn make_dummy_leaf(&self, relay_parent: Hash) -> TestLeaf { + self.make_dummy_leaf_with_multiple_cores_per_para(relay_parent, 1) + } + + fn make_dummy_leaf_with_multiple_cores_per_para( + &self, + relay_parent: Hash, + groups_for_first_para: usize, + ) -> TestLeaf { TestLeaf { number: 1, hash: relay_parent, parent_hash: Hash::repeat_byte(0), session: 1, availability_cores: self.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) + let para_id = if i < groups_for_first_para { + ParaId::from(0u32) + } else { + ParaId::from(i as u32) + }; + + CoreState::Scheduled(ScheduledCore { para_id, collator: None }) }), disabled_validators: Default::default(), para_data: (0..self.session_info.validator_groups.len()) - .map(|i| (ParaId::from(i as u32), PerParaData::new(1, vec![1, 2, 3].into()))) + .map(|i| { + let para_id = if i < groups_for_first_para { + ParaId::from(0u32) + } else { + ParaId::from(i as u32) + }; + + (para_id, PerParaData::new(1, vec![1, 2, 3].into())) + }) .collect(), minimum_backing_votes: 2, } diff --git a/polkadot/node/subsystem-util/src/vstaging.rs b/polkadot/node/subsystem-util/src/vstaging.rs index 3e807eff538..25ea7ce7c9b 100644 --- a/polkadot/node/subsystem-util/src/vstaging.rs +++ b/polkadot/node/subsystem-util/src/vstaging.rs @@ -19,14 +19,19 @@ //! This module is intended to contain common boiler plate code handling unreleased runtime API //! calls. +use std::collections::{BTreeMap, VecDeque}; + use polkadot_node_subsystem_types::messages::{RuntimeApiMessage, RuntimeApiRequest}; use polkadot_overseer::SubsystemSender; -use polkadot_primitives::{Hash, ValidatorIndex}; +use polkadot_primitives::{CoreIndex, Hash, Id as ParaId, ScheduledCore, ValidatorIndex}; -use crate::{has_required_runtime, request_disabled_validators, runtime}; +use crate::{has_required_runtime, request_claim_queue, request_disabled_validators, runtime}; const LOG_TARGET: &'static str = "parachain::subsystem-util-vstaging"; +/// A snapshot of the runtime claim queue at an arbitrary relay chain block. +pub type ClaimQueueSnapshot = BTreeMap>; + // TODO: https://github.com/paritytech/polkadot-sdk/issues/1940 /// Returns disabled validators list if the runtime supports it. Otherwise logs a debug messages and /// returns an empty vec. @@ -54,3 +59,40 @@ pub async fn get_disabled_validators_with_fallback, + relay_parent: Hash, +) -> Result, runtime::Error> { + if has_required_runtime( + sender, + relay_parent, + RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + ) + .await + { + let res = request_claim_queue(relay_parent, sender) + .await + .await + .map_err(runtime::Error::RuntimeRequestCanceled)??; + Ok(Some(res)) + } else { + gum::trace!(target: LOG_TARGET, "Runtime doesn't support `request_claim_queue`"); + Ok(None) + } +} + +/// Returns the next scheduled `ParaId` for a core in the claim queue, wrapped in `ScheduledCore`. +pub fn fetch_next_scheduled_on_core( + claim_queue: &ClaimQueueSnapshot, + core_idx: CoreIndex, +) -> Option { + claim_queue + .get(&core_idx)? + .front() + .cloned() + .map(|para_id| ScheduledCore { para_id, collator: None }) +} -- GitLab From 9b378a2ffef1d5846872adc4336341805bffbc30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 3 Apr 2024 10:35:53 +0200 Subject: [PATCH 090/128] sp-wasm-interface: `wasmtime` should not be enabled by `std` (#3954) Closes: https://github.com/paritytech/polkadot-sdk/issues/3909 --- substrate/primitives/wasm-interface/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/wasm-interface/Cargo.toml b/substrate/primitives/wasm-interface/Cargo.toml index c05cc05ff06..15a20fab5e5 100644 --- a/substrate/primitives/wasm-interface/Cargo.toml +++ b/substrate/primitives/wasm-interface/Cargo.toml @@ -25,5 +25,5 @@ anyhow = { version = "1.0.81", optional = true } [features] default = ["std"] -std = ["codec/std", "log/std", "wasmtime"] +std = ["codec/std", "log/std"] wasmtime = ["anyhow", "dep:wasmtime"] -- GitLab From cdacfb9d33710093d67e96cd6767dba0243ff450 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Wed, 3 Apr 2024 13:10:50 +0200 Subject: [PATCH 091/128] Added tests for XCM barriers: `AllowSubscriptions`, `WithUniqueTopic` and `TrailingSetTopicAsId` (#3955) Closes: https://github.com/paritytech/polkadot-sdk/issues/1756 --- .../xcm/xcm-builder/src/tests/barriers.rs | 59 ++++++++++++ polkadot/xcm/xcm-builder/src/tests/mod.rs | 1 + polkadot/xcm/xcm-builder/src/tests/routing.rs | 95 +++++++++++++++++++ .../src/tests/version_subscriptions.rs | 32 +++++-- .../xcm-executor/src/traits/should_execute.rs | 4 +- 5 files changed, 181 insertions(+), 10 deletions(-) create mode 100644 polkadot/xcm/xcm-builder/src/tests/routing.rs diff --git a/polkadot/xcm/xcm-builder/src/tests/barriers.rs b/polkadot/xcm/xcm-builder/src/tests/barriers.rs index 99a9dd5a660..6516263f57a 100644 --- a/polkadot/xcm/xcm-builder/src/tests/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/tests/barriers.rs @@ -309,3 +309,62 @@ fn suspension_should_work() { ); assert_eq!(r, Ok(())); } + +#[test] +fn allow_subscriptions_from_should_work() { + // allow only parent + AllowSubsFrom::set(vec![Location::parent()]); + + let valid_xcm_1 = Xcm::(vec![SubscribeVersion { + query_id: 42, + max_response_weight: Weight::from_parts(5000, 5000), + }]); + let valid_xcm_2 = Xcm::(vec![UnsubscribeVersion]); + let invalid_xcm_1 = Xcm::(vec![ + SetAppendix(Xcm(vec![])), + SubscribeVersion { query_id: 42, max_response_weight: Weight::from_parts(5000, 5000) }, + ]); + let invalid_xcm_2 = Xcm::(vec![ + SubscribeVersion { query_id: 42, max_response_weight: Weight::from_parts(5000, 5000) }, + SetTopic([0; 32]), + ]); + + let test_data = vec![ + ( + valid_xcm_1.clone(), + Parachain(1).into_location(), + // not allowed origin + Err(ProcessMessageError::Unsupported), + ), + (valid_xcm_1, Location::parent(), Ok(())), + ( + valid_xcm_2.clone(), + Parachain(1).into_location(), + // not allowed origin + Err(ProcessMessageError::Unsupported), + ), + (valid_xcm_2, Location::parent(), Ok(())), + ( + invalid_xcm_1, + Location::parent(), + // invalid XCM + Err(ProcessMessageError::BadFormat), + ), + ( + invalid_xcm_2, + Location::parent(), + // invalid XCM + Err(ProcessMessageError::BadFormat), + ), + ]; + + for (mut message, origin, expected_result) in test_data { + let r = AllowSubscriptionsFrom::>::should_execute( + &origin, + message.inner_mut(), + Weight::from_parts(10, 10), + &mut props(Weight::zero()), + ); + assert_eq!(r, expected_result, "Failed for origin: {origin:?} and message: {message:?}"); + } +} diff --git a/polkadot/xcm/xcm-builder/src/tests/mod.rs b/polkadot/xcm/xcm-builder/src/tests/mod.rs index e11caf6282b..63d254a1067 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mod.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mod.rs @@ -36,6 +36,7 @@ mod locking; mod origins; mod pay; mod querying; +mod routing; mod transacting; mod version_subscriptions; mod weight; diff --git a/polkadot/xcm/xcm-builder/src/tests/routing.rs b/polkadot/xcm/xcm-builder/src/tests/routing.rs new file mode 100644 index 00000000000..28117d647a0 --- /dev/null +++ b/polkadot/xcm/xcm-builder/src/tests/routing.rs @@ -0,0 +1,95 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; +use frame_support::{assert_ok, traits::Everything}; +use xcm_executor::traits::Properties; + +fn props() -> Properties { + Properties { weight_credit: Weight::zero(), message_id: None } +} + +#[test] +fn trailing_set_topic_as_id_with_unique_topic_should_work() { + type AllowSubscriptions = AllowSubscriptionsFrom; + + // check the validity of XCM for the `AllowSubscriptions` barrier + let valid_xcm = Xcm::<()>(vec![SubscribeVersion { + query_id: 42, + max_response_weight: Weight::from_parts(5000, 5000), + }]); + assert_eq!( + AllowSubscriptions::should_execute( + &Location::parent(), + valid_xcm.clone().inner_mut(), + Weight::from_parts(10, 10), + &mut props(), + ), + Ok(()) + ); + + // simulate sending `valid_xcm` with the `WithUniqueTopic` router + let mut sent_xcm = sp_io::TestExternalities::default().execute_with(|| { + assert_ok!(send_xcm::>(Location::parent(), valid_xcm,)); + sent_xcm() + }); + assert_eq!(1, sent_xcm.len()); + + // `sent_xcm` should contain `SubscribeVersion` and have `SetTopic` added + let mut sent_xcm = sent_xcm.remove(0).1; + let _ = sent_xcm + .0 + .matcher() + .assert_remaining_insts(2) + .expect("two instructions") + .match_next_inst(|instr| match instr { + SubscribeVersion { .. } => Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("expected instruction `SubscribeVersion`") + .match_next_inst(|instr| match instr { + SetTopic(..) => Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("expected instruction `SetTopic`"); + + // `sent_xcm` contains `SetTopic` and is now invalid for `AllowSubscriptions` + assert_eq!( + AllowSubscriptions::should_execute( + &Location::parent(), + sent_xcm.clone().inner_mut(), + Weight::from_parts(10, 10), + &mut props(), + ), + Err(ProcessMessageError::BadFormat) + ); + + // let's apply `TrailingSetTopicAsId` before `AllowSubscriptions` + let mut props = props(); + assert!(props.message_id.is_none()); + + // should pass, and the `message_id` is set + assert_eq!( + TrailingSetTopicAsId::::should_execute( + &Location::parent(), + sent_xcm.clone().inner_mut(), + Weight::from_parts(10, 10), + &mut props, + ), + Ok(()) + ); + assert!(props.message_id.is_some()); +} diff --git a/polkadot/xcm/xcm-builder/src/tests/version_subscriptions.rs b/polkadot/xcm/xcm-builder/src/tests/version_subscriptions.rs index e29e3a54661..01047fde989 100644 --- a/polkadot/xcm/xcm-builder/src/tests/version_subscriptions.rs +++ b/polkadot/xcm/xcm-builder/src/tests/version_subscriptions.rs @@ -27,16 +27,32 @@ fn simple_version_subscriptions_should_work() { ]); let mut hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(20, 20); - let r = XcmExecutor::::prepare_and_execute( - origin, - message, - &mut hash, - weight_limit, - Weight::zero(), + + // this case fails because the origin is not allowed + assert_eq!( + XcmExecutor::::prepare_and_execute( + origin, + message.clone(), + &mut hash, + weight_limit, + Weight::zero(), + ), + Outcome::Error { error: XcmError::Barrier } + ); + + // this case fails because the additional `SetAppendix` instruction is not allowed in the + // `AllowSubscriptionsFrom` + assert_eq!( + XcmExecutor::::prepare_and_execute( + Parent, + message, + &mut hash, + weight_limit, + Weight::zero(), + ), + Outcome::Error { error: XcmError::Barrier } ); - assert_eq!(r, Outcome::Error { error: XcmError::Barrier }); - let origin = Parachain(1000); let message = Xcm::(vec![SubscribeVersion { query_id: 42, max_response_weight: Weight::from_parts(5000, 5000), diff --git a/polkadot/xcm/xcm-executor/src/traits/should_execute.rs b/polkadot/xcm/xcm-executor/src/traits/should_execute.rs index 12e8fd6b87f..e76d56bfe61 100644 --- a/polkadot/xcm/xcm-executor/src/traits/should_execute.rs +++ b/polkadot/xcm/xcm-executor/src/traits/should_execute.rs @@ -33,9 +33,9 @@ pub struct Properties { /// Trait to determine whether the execution engine should actually execute a given XCM. /// /// Can be amalgamated into a tuple to have multiple trials. If any of the tuple elements returns -/// `Ok()`, the execution stops. Else, `Err(_)` is returned if all elements reject the message. +/// `Ok(())`, the execution stops. Else, `Err(_)` is returned if all elements reject the message. pub trait ShouldExecute { - /// Returns `true` if the given `message` may be executed. + /// Returns `Ok(())` if the given `message` may be executed. /// /// - `origin`: The origin (sender) of the message. /// - `instructions`: The message itself. -- GitLab From 287b116c3e50ff8be275b093674404b2f370c553 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 3 Apr 2024 14:46:08 +0300 Subject: [PATCH 092/128] chainHead: Ensure reasonable distance between leaf and finalized block (#3562) This PR ensure that the distance between any leaf and the finalized block is within a reasonable distance. For a new subscription, the chainHead has to provide all blocks between the leaves of the chain and the finalized block. When the distance between a leaf and the finalized block is large: - The tree route is costly to compute - We could deliver an unbounded number of blocks (potentially millions) (For more details see https://github.com/paritytech/polkadot-sdk/pull/3445#discussion_r1507210283) The configuration of the ChainHead is extended with: - suspend on lagging distance: When the distance between any leaf and the finalized block is greater than this number, the subscriptions are suspended for a given duration. - All active subscriptions are terminated with the `Stop` event, all blocks are unpinned and data discarded. - For incoming subscriptions, until the suspended period expires the subscriptions will immediately receive the `Stop` event. - Defaults to 128 blocks - suspended duration: The amount of time for which subscriptions are suspended - Defaults to 30 seconds cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile Co-authored-by: Sebastian Kunert --- Cargo.lock | 9 +- .../rpc-spec-v2/src/chain_head/chain_head.rs | 25 +- .../src/chain_head/chain_head_follow.rs | 71 +++++- .../src/chain_head/subscription/error.rs | 4 + .../src/chain_head/subscription/inner.rs | 238 +++++++----------- .../src/chain_head/subscription/mod.rs | 9 + .../rpc-spec-v2/src/chain_head/tests.rs | 121 +++++++++ 7 files changed, 313 insertions(+), 164 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9393f8d606d..55ef63fa2bf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2624,9 +2624,9 @@ dependencies = [ [[package]] name = "clap-num" -version = "1.1.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e063d263364859dc54fb064cedb7c122740cd4733644b14b176c097f51e8ab7" +checksum = "488557e97528174edaa2ee268b23a809e0c598213a4bbcb4f34575a46fda147e" dependencies = [ "num-traits", ] @@ -2844,10 +2844,11 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "colored" -version = "2.1.0" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" +checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6" dependencies = [ + "is-terminal", "lazy_static", "windows-sys 0.48.0", ] diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 975abbca4b6..86d9a726d7b 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -62,6 +62,9 @@ pub struct ChainHeadConfig { pub subscription_max_pinned_duration: Duration, /// The maximum number of ongoing operations per subscription. pub subscription_max_ongoing_operations: usize, + /// Stop all subscriptions if the distance between the leaves and the current finalized + /// block is larger than this value. + pub max_lagging_distance: usize, /// The maximum number of items reported by the `chainHead_storage` before /// pagination is required. pub operation_max_storage_items: usize, @@ -88,6 +91,10 @@ const MAX_ONGOING_OPERATIONS: usize = 16; /// before paginations is required. const MAX_STORAGE_ITER_ITEMS: usize = 5; +/// Stop all subscriptions if the distance between the leaves and the current finalized +/// block is larger than this value. +const MAX_LAGGING_DISTANCE: usize = 128; + /// The maximum number of `chainHead_follow` subscriptions per connection. const MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION: usize = 4; @@ -97,6 +104,7 @@ impl Default for ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: MAX_PINNED_DURATION, subscription_max_ongoing_operations: MAX_ONGOING_OPERATIONS, + max_lagging_distance: MAX_LAGGING_DISTANCE, operation_max_storage_items: MAX_STORAGE_ITER_ITEMS, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, } @@ -116,6 +124,9 @@ pub struct ChainHead, Block: BlockT, Client> { /// The maximum number of items reported by the `chainHead_storage` before /// pagination is required. operation_max_storage_items: usize, + /// Stop all subscriptions if the distance between the leaves and the current finalized + /// block is larger than this value. + max_lagging_distance: usize, /// Phantom member to pin the block type. _phantom: PhantomData, } @@ -140,6 +151,7 @@ impl, Block: BlockT, Client> ChainHead { backend, ), operation_max_storage_items: config.operation_max_storage_items, + max_lagging_distance: config.max_lagging_distance, _phantom: PhantomData, } } @@ -187,6 +199,7 @@ where let subscriptions = self.subscriptions.clone(); let backend = self.backend.clone(); let client = self.client.clone(); + let max_lagging_distance = self.max_lagging_distance; let fut = async move { // Ensure the current connection ID has enough space to accept a new subscription. @@ -207,8 +220,8 @@ where let Some(sub_data) = reserved_subscription.insert_subscription(sub_id.clone(), with_runtime) else { - // Inserting the subscription can only fail if the JsonRPSee - // generated a duplicate subscription ID. + // Inserting the subscription can only fail if the JsonRPSee generated a duplicate + // subscription ID. debug!(target: LOG_TARGET, "[follow][id={:?}] Subscription already accepted", sub_id); let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; @@ -222,9 +235,13 @@ where subscriptions, with_runtime, sub_id.clone(), + max_lagging_distance, ); - - chain_head_follow.generate_events(sink, sub_data).await; + let result = chain_head_follow.generate_events(sink, sub_data).await; + if let Err(SubscriptionManagementError::BlockDistanceTooLarge) = result { + debug!(target: LOG_TARGET, "[follow][id={:?}] All subscriptions are stopped", sub_id); + reserved_subscription.stop_all_subscriptions(); + } debug!(target: LOG_TARGET, "[follow][id={:?}] Subscription removed", sub_id); }; diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index 90cc62a36fa..0d87a45c07e 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -41,12 +41,14 @@ use sp_api::CallApiAt; use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, Info, }; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_runtime::{ + traits::{Block as BlockT, Header as HeaderT, NumberFor}, + SaturatedConversion, Saturating, +}; use std::{ collections::{HashSet, VecDeque}, sync::Arc, }; - /// The maximum number of finalized blocks provided by the /// `Initialized` event. const MAX_FINALIZED_BLOCKS: usize = 16; @@ -67,6 +69,9 @@ pub struct ChainHeadFollower, Block: BlockT, Client> { sub_id: String, /// The best reported block by this subscription. best_block_cache: Option, + /// Stop all subscriptions if the distance between the leaves and the current finalized + /// block is larger than this value. + max_lagging_distance: usize, } impl, Block: BlockT, Client> ChainHeadFollower { @@ -77,8 +82,17 @@ impl, Block: BlockT, Client> ChainHeadFollower, with_runtime: bool, sub_id: String, + max_lagging_distance: usize, ) -> Self { - Self { client, backend, sub_handle, with_runtime, sub_id, best_block_cache: None } + Self { + client, + backend, + sub_handle, + with_runtime, + sub_id, + best_block_cache: None, + max_lagging_distance, + } } } @@ -186,6 +200,35 @@ where } } + /// Check the distance between the provided blocks does not exceed a + /// a reasonable range. + /// + /// When the blocks are too far apart (potentially millions of blocks): + /// - Tree route is expensive to calculate. + /// - The RPC layer will not be able to generate the `NewBlock` events for all blocks. + /// + /// This edge-case can happen for parachains where the relay chain syncs slower to + /// the head of the chain than the parachain node that is synced already. + fn distace_within_reason( + &self, + block: Block::Hash, + finalized: Block::Hash, + ) -> Result<(), SubscriptionManagementError> { + let Some(block_num) = self.client.number(block)? else { + return Err(SubscriptionManagementError::BlockHashAbsent) + }; + let Some(finalized_num) = self.client.number(finalized)? else { + return Err(SubscriptionManagementError::BlockHashAbsent) + }; + + let distance: usize = block_num.saturating_sub(finalized_num).saturated_into(); + if distance > self.max_lagging_distance { + return Err(SubscriptionManagementError::BlockDistanceTooLarge); + } + + Ok(()) + } + /// Get the in-memory blocks of the client, starting from the provided finalized hash. /// /// The reported blocks are pinned by this function. @@ -198,6 +241,13 @@ where let mut pruned_forks = HashSet::new(); let mut finalized_block_descendants = Vec::new(); let mut unique_descendants = HashSet::new(); + + // Ensure all leaves are within a reasonable distance from the finalized block, + // before traversing the tree. + for leaf in &leaves { + self.distace_within_reason(*leaf, finalized)?; + } + for leaf in leaves { let tree_route = sp_blockchain::tree_route(blockchain, finalized, leaf)?; @@ -542,7 +592,8 @@ where mut to_ignore: HashSet, sink: SubscriptionSink, rx_stop: oneshot::Receiver<()>, - ) where + ) -> Result<(), SubscriptionManagementError> + where EventStream: Stream> + Unpin, { let mut stream_item = stream.next(); @@ -576,7 +627,7 @@ where ); let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; - return + return Err(err) }, }; @@ -591,7 +642,8 @@ where let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; - return + // No need to propagate this error further, the client disconnected. + return Ok(()) } } @@ -605,6 +657,7 @@ where // - the client disconnected. let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; + Ok(()) } /// Generate the block events for the `chainHead_follow` method. @@ -612,7 +665,7 @@ where &mut self, sink: SubscriptionSink, sub_data: InsertedSubscriptionData, - ) { + ) -> Result<(), SubscriptionManagementError> { // Register for the new block and finalized notifications. let stream_import = self .client @@ -640,7 +693,7 @@ where ); let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; - return + return Err(err) }, }; @@ -650,6 +703,6 @@ where let stream = stream::once(futures::future::ready(initial)).chain(merged); self.submit_events(&startup_point, stream.boxed(), pruned_forks, sink, sub_data.rx_stop) - .await; + .await } } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/error.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/error.rs index 2c22e51ca4d..91ce26db22a 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/error.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/error.rs @@ -41,6 +41,9 @@ pub enum SubscriptionManagementError { /// The unpin method was called with duplicate hashes. #[error("Duplicate hashes")] DuplicateHashes, + /// The distance between the leaves and the current finalized block is too large. + #[error("Distance too large")] + BlockDistanceTooLarge, /// Custom error. #[error("Subscription error {0}")] Custom(String), @@ -57,6 +60,7 @@ impl PartialEq for SubscriptionManagementError { (Self::BlockHeaderAbsent, Self::BlockHeaderAbsent) | (Self::SubscriptionAbsent, Self::SubscriptionAbsent) | (Self::DuplicateHashes, Self::DuplicateHashes) => true, + (Self::BlockDistanceTooLarge, Self::BlockDistanceTooLarge) => true, (Self::Custom(lhs), Self::Custom(rhs)) => lhs == rhs, _ => false, } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 1ebee3c80fc..0e5ccb91d39 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -560,6 +560,7 @@ pub struct SubscriptionsInner> { max_ongoing_operations: usize, /// Map the subscription ID to internal details of the subscription. subs: HashMap>, + /// Backend pinning / unpinning blocks. /// /// The `Arc` is handled one level-above, but substrate exposes the backend as Arc. @@ -623,6 +624,15 @@ impl> SubscriptionsInner { } } + /// All active subscriptions are removed. + pub fn stop_all_subscriptions(&mut self) { + let to_remove: Vec<_> = self.subs.keys().map(|sub_id| sub_id.clone()).collect(); + + for sub_id in to_remove { + self.remove_subscription(&sub_id); + } + } + /// Ensure that a new block could be pinned. /// /// If the global number of blocks has been reached this method @@ -878,6 +888,30 @@ mod tests { (backend, client) } + fn produce_blocks( + mut client: Arc>>, + num_blocks: usize, + ) -> Vec<::Hash> { + let mut blocks = Vec::with_capacity(num_blocks); + let mut parent_hash = client.chain_info().genesis_hash; + + for i in 0..num_blocks { + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .with_parent_block_number(i as u64) + .build() + .unwrap() + .build() + .unwrap() + .block; + parent_hash = block.header.hash(); + futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + blocks.push(block.header.hash()); + } + + blocks + } + #[test] fn block_state_machine_register_unpin() { let mut state = BlockStateMachine::new(); @@ -1003,37 +1037,10 @@ mod tests { #[test] fn unpin_duplicate_hashes() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_1 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_1) - .with_parent_block_number(1) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_2 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_2) - .with_parent_block_number(2) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_3 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); @@ -1102,18 +1109,10 @@ mod tests { #[test] fn subscription_check_block() { - let (backend, mut client) = init_backend(); - - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash = block.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 1); + let hash = hashes[0]; let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); @@ -1140,17 +1139,10 @@ mod tests { #[test] fn subscription_ref_count() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 1); + let hash = hashes[0]; let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); @@ -1190,37 +1182,10 @@ mod tests { #[test] fn subscription_remove_subscription() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_1 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_1) - .with_parent_block_number(1) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_2 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_2) - .with_parent_block_number(2) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_3 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); @@ -1256,37 +1221,10 @@ mod tests { #[test] fn subscription_check_limits() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_1 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_1) - .with_parent_block_number(1) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_2 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_2) - .with_parent_block_number(2) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_3 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); // Maximum number of pinned blocks is 2. let mut subs = @@ -1328,37 +1266,10 @@ mod tests { #[test] fn subscription_check_limits_with_duration() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_1 = block.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_1) - .with_parent_block_number(1) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_2 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_2) - .with_parent_block_number(2) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_3 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); // Maximum number of pinned blocks is 2 and maximum pin duration is 5 second. let mut subs = @@ -1456,6 +1367,39 @@ mod tests { assert_eq!(permit_three.num_ops, 1); } + #[test] + fn stop_all_subscriptions() { + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); + + let mut subs = + SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); + let id_1 = "abc".to_string(); + let id_2 = "abcd".to_string(); + + // Pin all blocks for the first subscription. + let _stop = subs.insert_subscription(id_1.clone(), true).unwrap(); + assert_eq!(subs.pin_block(&id_1, hash_1).unwrap(), true); + assert_eq!(subs.pin_block(&id_1, hash_2).unwrap(), true); + assert_eq!(subs.pin_block(&id_1, hash_3).unwrap(), true); + + // Pin only block 2 for the second subscription. + let _stop = subs.insert_subscription(id_2.clone(), true).unwrap(); + assert_eq!(subs.pin_block(&id_2, hash_2).unwrap(), true); + + // Check reference count. + assert_eq!(*subs.global_blocks.get(&hash_1).unwrap(), 1); + assert_eq!(*subs.global_blocks.get(&hash_2).unwrap(), 2); + assert_eq!(*subs.global_blocks.get(&hash_3).unwrap(), 1); + assert_eq!(subs.global_blocks.len(), 3); + + // Stop all active subscriptions. + subs.stop_all_subscriptions(); + assert!(subs.global_blocks.is_empty()); + } + #[test] fn reserved_subscription_cleans_resources() { let builder = TestClientBuilder::new(); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs index 5b016af1aa4..f266c9d8b34 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs @@ -233,6 +233,15 @@ impl> ReservedSubscription { }, } } + + /// Stop all active subscriptions. + /// + /// For all active subscriptions, the internal data is discarded, blocks are unpinned and the + /// `Stop` event will be generated. + pub fn stop_all_subscriptions(&self) { + let mut inner = self.inner.write(); + inner.stop_all_subscriptions() + } } impl> Drop for ReservedSubscription { diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index c3f10a201c5..c2bff7c50d5 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -63,6 +63,7 @@ const MAX_PINNED_BLOCKS: usize = 32; const MAX_PINNED_SECS: u64 = 60; const MAX_OPERATIONS: usize = 16; const MAX_PAGINATION_LIMIT: usize = 5; +const MAX_LAGGING_DISTANCE: usize = 128; const MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION: usize = 4; const INVALID_HASH: [u8; 32] = [1; 32]; @@ -88,6 +89,7 @@ pub async fn run_server() -> std::net::SocketAddr { subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, max_follow_subscriptions_per_connection: 1, + max_lagging_distance: MAX_LAGGING_DISTANCE, }, ) .into_rpc(); @@ -148,6 +150,7 @@ async fn setup_api() -> ( subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -199,6 +202,8 @@ async fn follow_subscription_produces_blocks() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -268,6 +273,8 @@ async fn follow_with_runtime() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -581,6 +588,8 @@ async fn call_runtime_without_flag() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -1240,6 +1249,8 @@ async fn separate_operation_ids_for_subscriptions() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -1329,6 +1340,8 @@ async fn follow_generates_initial_blocks() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -1485,6 +1498,8 @@ async fn follow_exceeding_pinned_blocks() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -1562,6 +1577,8 @@ async fn follow_with_unpin() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -1674,6 +1691,8 @@ async fn unpin_duplicate_hashes() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -1777,6 +1796,8 @@ async fn follow_with_multiple_unpin_hashes() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -1931,6 +1952,8 @@ async fn follow_prune_best_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -2117,6 +2140,8 @@ async fn follow_forks_pruned_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -2277,6 +2302,8 @@ async fn follow_report_multiple_pruned_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -2523,6 +2550,8 @@ async fn pin_block_references() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -2661,6 +2690,8 @@ async fn follow_finalized_before_new_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -2776,6 +2807,8 @@ async fn ensure_operation_limits_works() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: 1, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -2881,6 +2914,8 @@ async fn check_continue_operation() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: 1, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -3064,6 +3099,8 @@ async fn stop_storage_operation() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: 1, + + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) @@ -3351,6 +3388,88 @@ async fn storage_closest_merkle_value() { ); } +#[tokio::test] +async fn chain_head_stop_all_subscriptions() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + // Configure the chainHead to stop all subscriptions on lagging distance of 5 blocks. + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TaskExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_lagging_distance: 5, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + }, + ) + .into_rpc(); + + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + + // Ensure the imported block is propagated and pinned for this subscription. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::Initialized(_) + ); + + // Import 6 blocks in total to trigger the suspension distance. + let mut parent_hash = client.chain_info().genesis_hash; + for i in 0..6 { + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .with_parent_block_number(i) + .build() + .unwrap() + .build() + .unwrap() + .block; + + let hash = block.hash(); + parent_hash = hash; + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + } + + let mut second_sub = + api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + // Lagging detected, the stop event is delivered immediately. + assert_matches!( + get_next_event::>(&mut second_sub).await, + FollowEvent::Stop + ); + + // Ensure that all subscriptions are stopped. + assert_matches!(get_next_event::>(&mut sub).await, FollowEvent::Stop); + + // Other subscriptions cannot be started until the suspension period is over. + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + // Should receive the stop event immediately. + assert_matches!(get_next_event::>(&mut sub).await, FollowEvent::Stop); + + // For the next subscription, lagging distance must be smaller. + client.finalize_block(parent_hash, None).unwrap(); + + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::Initialized(_) + ); +} + #[tokio::test] async fn chain_head_single_connection_context() { let server_addr = run_server().await; @@ -3500,12 +3619,14 @@ async fn chain_head_limit_reached() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: 1, }, ) .into_rpc(); let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + // Initialized must always be reported first. let _event: FollowEvent = get_next_event(&mut sub).await; -- GitLab From ecc51a25350145e3848f306cb30d92423b78d974 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 3 Apr 2024 17:00:38 +0200 Subject: [PATCH 093/128] Enable pov-reclaim on rococo and westend parachains (#3858) Enables pov-reclaim on the rococo/westend parachains, part of https://github.com/paritytech/polkadot-sdk/issues/3622 --- Cargo.lock | 10 ++++++++++ .../runtimes/assets/asset-hub-rococo/Cargo.toml | 2 ++ .../runtimes/assets/asset-hub-rococo/src/lib.rs | 1 + .../runtimes/assets/asset-hub-westend/Cargo.toml | 2 ++ .../runtimes/assets/asset-hub-westend/src/lib.rs | 1 + .../runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml | 2 ++ .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 4 +++- .../bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs | 1 + .../bridge-hubs/bridge-hub-rococo/tests/tests.rs | 1 + .../runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml | 3 +++ .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 2 ++ .../bridge-hubs/bridge-hub-westend/tests/tests.rs | 1 + .../collectives/collectives-westend/Cargo.toml | 3 +++ .../collectives/collectives-westend/src/lib.rs | 1 + .../runtimes/contracts/contracts-rococo/Cargo.toml | 3 +++ .../runtimes/contracts/contracts-rococo/src/lib.rs | 1 + .../runtimes/coretime/coretime-rococo/Cargo.toml | 2 ++ .../runtimes/coretime/coretime-rococo/src/lib.rs | 1 + .../runtimes/coretime/coretime-westend/Cargo.toml | 3 +++ .../runtimes/coretime/coretime-westend/src/lib.rs | 1 + .../runtimes/people/people-rococo/Cargo.toml | 2 ++ .../runtimes/people/people-rococo/src/lib.rs | 1 + .../runtimes/people/people-westend/Cargo.toml | 2 ++ .../runtimes/people/people-westend/src/lib.rs | 1 + 24 files changed, 50 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 55ef63fa2bf..42524fc7765 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -867,6 +867,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -990,6 +991,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -2010,6 +2012,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -2181,6 +2184,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -2739,6 +2743,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -2994,6 +2999,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -3088,6 +3094,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -3152,6 +3159,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -11764,6 +11772,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "enumflags2", "frame-benchmarking", @@ -11863,6 +11872,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "enumflags2", "frame-benchmarking", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index f5ea0937dec..0733156716c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -78,6 +78,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -190,6 +191,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 293416ab2a9..7edec45abfb 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -955,6 +955,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index b792d64c03e..e25554ec0a5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -76,6 +76,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -178,6 +179,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index e92e801e9f5..d17d5a70757 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -927,6 +927,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 1dd4f499b4d..f5a75aa03ac 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -78,6 +78,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f ] } cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } @@ -156,6 +157,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index fd6d44ec275..9796a77f994 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -130,6 +130,7 @@ pub type SignedExtra = ( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages, bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages, ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -1493,7 +1494,8 @@ mod tests { ( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), - ) + ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); // for BridgeHubRococo diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs index 101b8d86d55..5960ab7b550 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs @@ -187,6 +187,7 @@ fn construct_extrinsic( OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index fad357b0951..776c505fa64 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -64,6 +64,7 @@ fn construct_extrinsic( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 1501ed12e3a..86560caca99 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -71,6 +71,8 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } + pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -128,6 +130,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 3b759301d0e..4318df8f15e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -109,6 +109,7 @@ pub type SignedExtra = ( pallet_transaction_payment::ChargeTransactionPayment, BridgeRejectObsoleteHeadersAndMessages, (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages,), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -1154,6 +1155,7 @@ mod tests { ( bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(), ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new() ); { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 235b7f146c8..988b10e1e2d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -78,6 +78,7 @@ fn construct_extrinsic( pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index 9c3acf6ad93..22821170a54 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -77,6 +77,8 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } + pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } pallet-collective-content = { path = "../../../pallets/collective-content", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } @@ -172,6 +174,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index e1c2e1a6237..170b0a39600 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -713,6 +713,7 @@ pub type SignedExtra = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index a0aeb642df0..74c5b5f8115 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -74,6 +74,8 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } + pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -90,6 +92,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index ec0a5f6fc96..936d87a23ca 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -90,6 +90,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index eb92afc4311..ee9f5e87ec8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -72,6 +72,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -88,6 +89,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 67f48689353..27965aa204f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -99,6 +99,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index b8efecffc50..60cc7e2f765 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -71,6 +71,8 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } + pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -87,6 +89,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 609ea5a38a8..8075ffb4f1c 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -99,6 +99,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index eebd662c3fd..7183be5fc82 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -69,6 +69,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -85,6 +86,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "enumflags2/std", "frame-benchmarking?/std", diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 7c9427a2493..1b6499f5d61 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -93,6 +93,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index 39cb69e679c..576c3b1aa4e 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -69,6 +69,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -85,6 +86,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "enumflags2/std", "frame-benchmarking?/std", diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 3e331e5e8eb..6ae53d641b0 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -93,6 +93,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. -- GitLab From 0f4e849e0ac2de8c9880077c085985c5f656329c Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Wed, 3 Apr 2024 18:01:34 +0300 Subject: [PATCH 094/128] Add ClaimQueue wrapper (#3950) Remove `fetch_next_scheduled_on_core` in favor of new wrapper and methods for accessing it. --------- Signed-off-by: Andrei Sandu --- polkadot/node/collation-generation/src/lib.rs | 14 +++--- .../node/collation-generation/src/tests.rs | 13 +++--- .../statement-distribution/src/v2/mod.rs | 4 +- polkadot/node/subsystem-util/src/vstaging.rs | 44 ++++++++++++------- prdoc/pr_3950.prdoc | 12 +++++ 5 files changed, 55 insertions(+), 32 deletions(-) create mode 100644 prdoc/pr_3950.prdoc diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index fb82871bb15..60ea1cf5ff4 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -45,13 +45,12 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{ request_async_backing_params, request_availability_cores, request_para_backing_state, request_persisted_validation_data, request_validation_code, request_validation_code_hash, - request_validators, - vstaging::{fetch_claim_queue, fetch_next_scheduled_on_core}, + request_validators, vstaging::fetch_claim_queue, }; use polkadot_primitives::{ collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt, CollatorPair, CoreIndex, CoreState, Hash, Id as ParaId, OccupiedCoreAssumption, - PersistedValidationData, ValidationCodeHash, + PersistedValidationData, ScheduledCore, ValidationCodeHash, }; use sp_core::crypto::Pair; use std::sync::Arc; @@ -245,11 +244,10 @@ async fn handle_new_activations( // Use claim queue if available, or fallback to `next_up_on_available` let res = match maybe_claim_queue { Some(ref claim_queue) => { - // read what's in the claim queue for this core - fetch_next_scheduled_on_core( - claim_queue, - CoreIndex(core_idx as u32), - ) + // read what's in the claim queue for this core at depth 0. + claim_queue + .get_claim_for(CoreIndex(core_idx as u32), 0) + .map(|para_id| ScheduledCore { para_id, collator: None }) }, None => { // Runtime doesn't support claim queue runtime api. Fallback to diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs index 781d27188df..1ec2cccfae7 100644 --- a/polkadot/node/collation-generation/src/tests.rs +++ b/polkadot/node/collation-generation/src/tests.rs @@ -28,7 +28,7 @@ use polkadot_node_subsystem::{ ActivatedLeaf, }; use polkadot_node_subsystem_test_helpers::{subsystem_test_harness, TestSubsystemContextHandle}; -use polkadot_node_subsystem_util::{vstaging::ClaimQueueSnapshot, TimeoutExt}; +use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::{ async_backing::{BackingState, CandidatePendingAvailability}, AsyncBackingParams, BlockNumber, CollatorPair, HeadData, PersistedValidationData, @@ -620,8 +620,7 @@ fn fallback_when_no_validation_code_hash_api(#[case] runtime_version: u32) { _hash, RuntimeApiRequest::ClaimQueue(tx), ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { - let res = ClaimQueueSnapshot::new(); - tx.send(Ok(res)).unwrap(); + tx.send(Ok(Default::default())).unwrap(); }, Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( _hash, @@ -783,7 +782,7 @@ fn distribute_collation_for_occupied_core_with_async_backing_enabled(#[case] run candidate_hash: Default::default(), candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), })]; - let claim_queue = ClaimQueueSnapshot::from([(CoreIndex::from(0), VecDeque::from([para_id]))]); + let claim_queue = BTreeMap::from([(CoreIndex::from(0), VecDeque::from([para_id]))]).into(); test_harness(|mut virtual_overseer| async move { helpers::initialize_collator(&mut virtual_overseer, para_id).await; @@ -965,7 +964,7 @@ fn no_collation_is_distributed_for_occupied_core_with_async_backing_disabled( candidate_hash: Default::default(), candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), })]; - let claim_queue = ClaimQueueSnapshot::from([(CoreIndex::from(0), VecDeque::from([para_id]))]); + let claim_queue = BTreeMap::from([(CoreIndex::from(0), VecDeque::from([para_id]))]).into(); test_harness(|mut virtual_overseer| async move { helpers::initialize_collator(&mut virtual_overseer, para_id).await; @@ -1053,7 +1052,7 @@ mod helpers { async_backing_params: AsyncBackingParams, cores: Vec, runtime_version: u32, - claim_queue: ClaimQueueSnapshot, + claim_queue: BTreeMap>, ) { assert_matches!( overseer_recv(virtual_overseer).await, @@ -1107,7 +1106,7 @@ mod helpers { RuntimeApiRequest::ClaimQueue(tx), )) => { assert_eq!(hash, activated_hash); - let _ = tx.send(Ok(claim_queue)); + let _ = tx.send(Ok(claim_queue.into())); } ); } diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index b9f6f705ed8..f5a8ec4a269 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -2163,8 +2163,8 @@ async fn determine_groups_per_para( // pending availability. let para_core_indices: Vec<_> = if let Some(claim_queue) = maybe_claim_queue { claim_queue - .into_iter() - .filter_map(|(core_index, paras)| Some((*paras.front()?, core_index))) + .iter_claims_at_depth(0) + .map(|(core_index, para)| (para, core_index)) .collect() } else { availability_cores diff --git a/polkadot/node/subsystem-util/src/vstaging.rs b/polkadot/node/subsystem-util/src/vstaging.rs index 25ea7ce7c9b..b166a54f75c 100644 --- a/polkadot/node/subsystem-util/src/vstaging.rs +++ b/polkadot/node/subsystem-util/src/vstaging.rs @@ -23,14 +23,40 @@ use std::collections::{BTreeMap, VecDeque}; use polkadot_node_subsystem_types::messages::{RuntimeApiMessage, RuntimeApiRequest}; use polkadot_overseer::SubsystemSender; -use polkadot_primitives::{CoreIndex, Hash, Id as ParaId, ScheduledCore, ValidatorIndex}; +use polkadot_primitives::{CoreIndex, Hash, Id as ParaId, ValidatorIndex}; use crate::{has_required_runtime, request_claim_queue, request_disabled_validators, runtime}; const LOG_TARGET: &'static str = "parachain::subsystem-util-vstaging"; /// A snapshot of the runtime claim queue at an arbitrary relay chain block. -pub type ClaimQueueSnapshot = BTreeMap>; +#[derive(Default)] +pub struct ClaimQueueSnapshot(BTreeMap>); + +impl From>> for ClaimQueueSnapshot { + fn from(claim_queue_snapshot: BTreeMap>) -> Self { + ClaimQueueSnapshot(claim_queue_snapshot) + } +} + +impl ClaimQueueSnapshot { + /// Returns the `ParaId` that has a claim for `core_index` at the specified `depth` in the + /// claim queue. A depth of `0` means the very next block. + pub fn get_claim_for(&self, core_index: CoreIndex, depth: usize) -> Option { + self.0.get(&core_index)?.get(depth).copied() + } + + /// Returns an iterator over all claimed cores and the claiming `ParaId` at the specified + /// `depth` in the claim queue. + pub fn iter_claims_at_depth( + &self, + depth: usize, + ) -> impl Iterator + '_ { + self.0 + .iter() + .filter_map(move |(core_index, paras)| Some((*core_index, *paras.get(depth)?))) + } +} // TODO: https://github.com/paritytech/polkadot-sdk/issues/1940 /// Returns disabled validators list if the runtime supports it. Otherwise logs a debug messages and @@ -78,21 +104,9 @@ pub async fn fetch_claim_queue( .await .await .map_err(runtime::Error::RuntimeRequestCanceled)??; - Ok(Some(res)) + Ok(Some(res.into())) } else { gum::trace!(target: LOG_TARGET, "Runtime doesn't support `request_claim_queue`"); Ok(None) } } - -/// Returns the next scheduled `ParaId` for a core in the claim queue, wrapped in `ScheduledCore`. -pub fn fetch_next_scheduled_on_core( - claim_queue: &ClaimQueueSnapshot, - core_idx: CoreIndex, -) -> Option { - claim_queue - .get(&core_idx)? - .front() - .cloned() - .map(|para_id| ScheduledCore { para_id, collator: None }) -} diff --git a/prdoc/pr_3950.prdoc b/prdoc/pr_3950.prdoc new file mode 100644 index 00000000000..a333521898b --- /dev/null +++ b/prdoc/pr_3950.prdoc @@ -0,0 +1,12 @@ +title: Add `ClaimQueue` wrapper + +doc: + - audience: Node Dev + description: | + Intoduces a new wrapper type: `ClaimQueueSnapshot`. It contains a snapshot of the `ClaimQueue` + at an arbitrary relay chain block. Two methods are exposed to allow access to the claims at + specific depths. + +crates: + - name: polkadot-node-subsystem-util + bump: minor -- GitLab From 3836376965104d7723a1659d52ee26232019b929 Mon Sep 17 00:00:00 2001 From: gupnik Date: Thu, 4 Apr 2024 07:50:15 +0530 Subject: [PATCH 095/128] Renames `frame` crate to `polkadot-sdk-frame` (#3813) Step in https://github.com/paritytech/polkadot-sdk/issues/3155 Needed for https://github.com/paritytech/eng-automation/issues/6 This PR renames `frame` crate to `polkadot-sdk-frame` as `frame` is not available on crates.io --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- Cargo.lock | 70 +++++++++---------- docs/sdk/Cargo.toml | 2 +- prdoc/pr_3813.prdoc | 14 ++++ substrate/client/chain-spec/src/lib.rs | 2 +- substrate/frame/Cargo.toml | 4 +- .../frame/examples/frame-crate/Cargo.toml | 2 +- substrate/frame/src/lib.rs | 24 ++++--- substrate/frame/support/procedural/src/lib.rs | 8 +-- .../procedural/src/pallet/parse/config.rs | 42 ++++++++--- .../frame/support/procedural/tools/src/lib.rs | 16 +++-- .../support/test/stg_frame_crate/Cargo.toml | 2 +- ...event_type_invalid_bound_no_frame_crate.rs | 6 +- ...t_type_invalid_bound_no_frame_crate.stderr | 2 +- .../primitives/api/proc-macro/src/utils.rs | 4 +- .../benchmarking-cli/src/pallet/command.rs | 2 +- substrate/utils/frame/rpc/support/src/lib.rs | 2 +- templates/minimal/node/Cargo.toml | 2 +- templates/minimal/pallets/template/Cargo.toml | 2 +- templates/minimal/runtime/Cargo.toml | 2 +- 19 files changed, 130 insertions(+), 78 deletions(-) create mode 100644 prdoc/pr_3813.prdoc diff --git a/Cargo.lock b/Cargo.lock index 42524fc7765..fc340ff1119 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5454,35 +5454,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" -[[package]] -name = "frame" -version = "0.0.1-dev" -dependencies = [ - "docify", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-rpc-runtime-api", - "log", - "pallet-examples", - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-arithmetic", - "sp-block-builder", - "sp-consensus-aura", - "sp-consensus-grandpa", - "sp-core", - "sp-inherents", - "sp-io", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std 14.0.0", - "sp-transaction-pool", - "sp-version", -] - [[package]] name = "frame-benchmarking" version = "28.0.0" @@ -5820,8 +5791,8 @@ dependencies = [ name = "frame-support-test-stg-frame-crate" version = "0.1.0" dependencies = [ - "frame", "parity-scale-codec", + "polkadot-sdk-frame", "scale-info", ] @@ -8132,11 +8103,11 @@ name = "minimal-template-node" version = "0.0.0" dependencies = [ "clap 4.5.3", - "frame", "futures", "futures-timer", "jsonrpsee", "minimal-template-runtime", + "polkadot-sdk-frame", "sc-basic-authorship", "sc-cli", "sc-client-api", @@ -8166,7 +8137,6 @@ dependencies = [ name = "minimal-template-runtime" version = "0.0.0" dependencies = [ - "frame", "pallet-balances", "pallet-minimal-template", "pallet-sudo", @@ -8174,6 +8144,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", + "polkadot-sdk-frame", "scale-info", "sp-genesis-builder", "substrate-wasm-builder", @@ -9844,8 +9815,8 @@ dependencies = [ name = "pallet-example-frame-crate" version = "0.0.1" dependencies = [ - "frame", "parity-scale-codec", + "polkadot-sdk-frame", "scale-info", ] @@ -10178,8 +10149,8 @@ dependencies = [ name = "pallet-minimal-template" version = "0.0.0" dependencies = [ - "frame", "parity-scale-codec", + "polkadot-sdk-frame", "scale-info", ] @@ -13377,7 +13348,6 @@ dependencies = [ "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", "docify", - "frame", "frame-executive", "frame-support", "frame-system", @@ -13405,6 +13375,7 @@ dependencies = [ "pallet-uniques", "pallet-utility", "parity-scale-codec", + "polkadot-sdk-frame", "sc-cli", "sc-client-db", "sc-consensus-aura", @@ -13434,6 +13405,35 @@ dependencies = [ "substrate-wasm-builder", ] +[[package]] +name = "polkadot-sdk-frame" +version = "0.1.0" +dependencies = [ + "docify", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-rpc-runtime-api", + "log", + "pallet-examples", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-arithmetic", + "sp-block-builder", + "sp-consensus-aura", + "sp-consensus-grandpa", + "sp-core", + "sp-inherents", + "sp-io", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 14.0.0", + "sp-transaction-pool", + "sp-version", +] + [[package]] name = "polkadot-service" version = "7.0.0" diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 64b23866f0c..426c5d9de4a 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -17,7 +17,7 @@ workspace = true # Needed for all FRAME-based code parity-scale-codec = { version = "3.0.0", default-features = false } scale-info = { version = "2.6.0", default-features = false } -frame = { path = "../../substrate/frame", features = [ +frame = { package = "polkadot-sdk-frame", path = "../../substrate/frame", features = [ "experimental", "runtime", ] } diff --git a/prdoc/pr_3813.prdoc b/prdoc/pr_3813.prdoc new file mode 100644 index 00000000000..66dfd70e1b1 --- /dev/null +++ b/prdoc/pr_3813.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Renames `frame` crate to `polkadot-sdk-frame` + +doc: + - audience: Runtime Dev + description: | + This PR renames `frame` crate to `polkadot-sdk-frame` as `frame` is not available on crates.io. + Please note that this crate can only be imported as `polkadot-sdk-frame` or `frame`. + +crates: + - name: polkadot-sdk-frame + bump: major diff --git a/substrate/client/chain-spec/src/lib.rs b/substrate/client/chain-spec/src/lib.rs index e8b87a60404..6da5fd26d52 100644 --- a/substrate/client/chain-spec/src/lib.rs +++ b/substrate/client/chain-spec/src/lib.rs @@ -140,7 +140,7 @@ //! A JSON object that provides an explicit and comprehensive representation of the //! RuntimeGenesisConfig struct, which is generated by frame::runtime::prelude::construct_runtime macro (polkadot_sdk_frame::runtime::prelude::construct_runtime macro (example of generated struct). Must contain all the keys of //! the genesis config, no defaults will be used. diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 27001ee5afd..919d6d17ce8 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "frame" -version = "0.0.1-dev" +name = "polkadot-sdk-frame" +version = "0.1.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index 76bfd65282a..3a0e4f720f9 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame = { path = "../..", default-features = false, features = ["experimental", "runtime"] } +frame = { package = "polkadot-sdk-frame", path = "../..", default-features = false, features = ["experimental", "runtime"] } [features] diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index 52db7c34bfd..d395b4c1902 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -45,16 +45,20 @@ //! //! In short, this crate only re-exports types and traits from multiple sources. All of these //! sources are listed (and re-exported again) in [`deps`]. +//! +//! ## Usage +//! +//! Please note that this crate can only be imported as `polkadot-sdk-frame` or `frame`. #![cfg_attr(not(feature = "std"), no_std)] #![cfg(feature = "experimental")] /// Exports the main pallet macro. This can wrap a `mod pallet` and will transform it into -/// being a pallet, eg `#[frame::pallet] mod pallet { .. }`. +/// being a pallet, eg `#[polkadot_sdk_frame::pallet] mod pallet { .. }`. /// /// Note that this is not part of the prelude, in order to make it such that the common way to -/// define a macro is `#[frame::pallet] mod pallet { .. }`, followed by `#[pallet::foo]`, -/// `#[pallet::bar]` inside the mod. +/// define a macro is `#[polkadot_sdk_frame::pallet] mod pallet { .. }`, followed by +/// `#[pallet::foo]`, `#[pallet::bar]` inside the mod. pub use frame_support::pallet; pub use frame_support::pallet_macros::{import_section, pallet_section}; @@ -75,7 +79,7 @@ pub mod pallet_macros { /// This prelude should almost always be the first line of code in any pallet or runtime. /// /// ``` -/// use frame::prelude::*; +/// use polkadot_sdk_frame::prelude::*; /// /// // rest of your pallet.. /// mod pallet {} @@ -84,7 +88,7 @@ pub mod prelude { /// `frame_system`'s parent crate, which is mandatory in all pallets build with this crate. /// /// Conveniently, the keyword `frame_system` is in scope as one uses `use - /// frame::prelude::*` + /// polkadot_sdk_frame::prelude::*` #[doc(inline)] pub use frame_system; @@ -112,7 +116,7 @@ pub mod prelude { /// A test setup typically starts with: /// /// ``` -/// use frame::testing_prelude::*; +/// use polkadot_sdk_frame::testing_prelude::*; /// // rest of your test setup. /// ``` #[cfg(feature = "std")] @@ -141,7 +145,7 @@ pub mod runtime { /// A runtime typically starts with: /// /// ``` - /// use frame::{prelude::*, runtime::prelude::*}; + /// use polkadot_sdk_frame::{prelude::*, runtime::prelude::*}; /// ``` pub mod prelude { /// All of the types related to the FRAME runtime executive. @@ -186,7 +190,7 @@ pub mod runtime { /// A non-testing runtime should have this enabled, as such: /// /// ``` - /// use frame::runtime::{prelude::*, apis::{*,}}; + /// use polkadot_sdk_frame::runtime::{prelude::*, apis::{*,}}; /// ``` // TODO: This is because of wildcard imports, and it should be not needed once we can avoid // that. Imports like that are needed because we seem to need some unknown types in the macro @@ -330,8 +334,8 @@ pub mod derive { /// In most cases, hopefully the answer is yes. pub mod deps { // TODO: It would be great to somehow instruct RA to prefer *not* suggesting auto-imports from - // these. For example, we prefer `frame::derive::CloneNoBound` rather than - // `frame::deps::frame_support::CloneNoBound`. + // these. For example, we prefer `polkadot_sdk_frame::derive::CloneNoBound` rather than + // `polkadot_sdk_frame::deps::frame_support::CloneNoBound`. pub use frame_support; pub use frame_system; diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index bc62c0509b0..f22be024d3f 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -665,9 +665,9 @@ pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream "{}::macro_magic", match generate_access_from_frame_or_crate("frame-support") { Ok(path) => Ok(path), - Err(_) => generate_access_from_frame_or_crate("frame"), + Err(_) => generate_access_from_frame_or_crate("polkadot-sdk-frame"), } - .expect("Failed to find either `frame-support` or `frame` in `Cargo.toml` dependencies.") + .expect("Failed to find either `frame-support` or `polkadot-sdk-frame` in `Cargo.toml` dependencies.") .to_token_stream() .to_string() ) @@ -1181,9 +1181,9 @@ pub fn pallet_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { "{}::macro_magic", match generate_access_from_frame_or_crate("frame-support") { Ok(path) => Ok(path), - Err(_) => generate_access_from_frame_or_crate("frame"), + Err(_) => generate_access_from_frame_or_crate("polkadot-sdk-frame"), } - .expect("Failed to find either `frame-support` or `frame` in `Cargo.toml` dependencies.") + .expect("Failed to find either `frame-support` or `polkadot-sdk-frame` in `Cargo.toml` dependencies.") .to_token_stream() .to_string() ) diff --git a/substrate/frame/support/procedural/src/pallet/parse/config.rs b/substrate/frame/support/procedural/src/pallet/parse/config.rs index fbab92db196..406072df4b9 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/config.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/config.rs @@ -275,7 +275,8 @@ fn check_event_type( } /// Check that the path to `frame_system::Config` is valid, this is that the path is just -/// `frame_system::Config` or when using the `frame` crate it is `frame::xyz::frame_system::Config`. +/// `frame_system::Config` or when using the `frame` crate it is +/// `polkadot_sdk_frame::xyz::frame_system::Config`. fn has_expected_system_config(path: syn::Path, frame_system: &syn::Path) -> bool { // Check if `frame_system` is actually 'frame_system'. if path.segments.iter().all(|s| s.ident != "frame_system") { @@ -293,7 +294,7 @@ fn has_expected_system_config(path: syn::Path, frame_system: &syn::Path) -> bool // `frame` re-exports it as such. syn::parse2::(quote::quote!(frame_system)).expect("is a valid path; qed"), (_, _) => - // They are either both `frame_system` or both `frame::xyz::frame_system`. + // They are either both `frame_system` or both `polkadot_sdk_frame::xyz::frame_system`. frame_system.clone(), }; @@ -516,14 +517,28 @@ mod tests { #[test] fn has_expected_system_config_works_with_frame() { + let path = syn::parse2::(quote::quote!(frame_system::Config)).unwrap(); + + let frame_system = + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system)) + .unwrap(); + assert!(has_expected_system_config(path.clone(), &frame_system)); + let frame_system = syn::parse2::(quote::quote!(frame::deps::frame_system)).unwrap(); - let path = syn::parse2::(quote::quote!(frame_system::Config)).unwrap(); assert!(has_expected_system_config(path, &frame_system)); } #[test] fn has_expected_system_config_works_with_frame_full_path() { + let frame_system = + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system)) + .unwrap(); + let path = + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system::Config)) + .unwrap(); + assert!(has_expected_system_config(path, &frame_system)); + let frame_system = syn::parse2::(quote::quote!(frame::deps::frame_system)).unwrap(); let path = @@ -533,6 +548,13 @@ mod tests { #[test] fn has_expected_system_config_works_with_other_frame_full_path() { + let frame_system = + syn::parse2::(quote::quote!(polkadot_sdk_frame::xyz::frame_system)).unwrap(); + let path = + syn::parse2::(quote::quote!(polkadot_sdk_frame::xyz::frame_system::Config)) + .unwrap(); + assert!(has_expected_system_config(path, &frame_system)); + let frame_system = syn::parse2::(quote::quote!(frame::xyz::frame_system)).unwrap(); let path = @@ -543,18 +565,21 @@ mod tests { #[test] fn has_expected_system_config_does_not_works_with_mixed_frame_full_path() { let frame_system = - syn::parse2::(quote::quote!(frame::xyz::frame_system)).unwrap(); + syn::parse2::(quote::quote!(polkadot_sdk_frame::xyz::frame_system)).unwrap(); let path = - syn::parse2::(quote::quote!(frame::deps::frame_system::Config)).unwrap(); + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system::Config)) + .unwrap(); assert!(!has_expected_system_config(path, &frame_system)); } #[test] fn has_expected_system_config_does_not_works_with_other_mixed_frame_full_path() { let frame_system = - syn::parse2::(quote::quote!(frame::deps::frame_system)).unwrap(); + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system)) + .unwrap(); let path = - syn::parse2::(quote::quote!(frame::xyz::frame_system::Config)).unwrap(); + syn::parse2::(quote::quote!(polkadot_sdk_frame::xyz::frame_system::Config)) + .unwrap(); assert!(!has_expected_system_config(path, &frame_system)); } @@ -562,7 +587,8 @@ mod tests { fn has_expected_system_config_does_not_work_with_frame_full_path_if_not_frame_crate() { let frame_system = syn::parse2::(quote::quote!(frame_system)).unwrap(); let path = - syn::parse2::(quote::quote!(frame::deps::frame_system::Config)).unwrap(); + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system::Config)) + .unwrap(); assert!(!has_expected_system_config(path, &frame_system)); } diff --git a/substrate/frame/support/procedural/tools/src/lib.rs b/substrate/frame/support/procedural/tools/src/lib.rs index be439613362..8952cd6011f 100644 --- a/substrate/frame/support/procedural/tools/src/lib.rs +++ b/substrate/frame/support/procedural/tools/src/lib.rs @@ -54,15 +54,19 @@ pub fn generate_crate_access(unique_id: &str, def_crate: &str) -> TokenStream { /// /// This will usually check the output of [`generate_access_from_frame_or_crate`]. /// We want to know if whatever the `path` takes us to, is exported from `frame` or not. In that -/// case `path` would start with `frame`, something like `frame::x::y:z`. +/// case `path` would start with `frame`, something like `polkadot_sdk_frame::x::y:z` or +/// frame::x::y:z. pub fn is_using_frame_crate(path: &syn::Path) -> bool { - path.segments.first().map(|s| s.ident == "frame").unwrap_or(false) + path.segments + .first() + .map(|s| s.ident == "polkadot_sdk_frame" || s.ident == "frame") + .unwrap_or(false) } /// Generate the crate access for the crate using 2018 syntax. /// -/// If `frame` is in scope, it will use `frame::deps::`. Else, it will try and find -/// `` directly. +/// If `frame` is in scope, it will use `polkadot_sdk_frame::deps::`. Else, it will try +/// and find `` directly. pub fn generate_access_from_frame_or_crate(def_crate: &str) -> Result { if let Some(path) = get_frame_crate_path(def_crate) { Ok(path) @@ -114,7 +118,9 @@ pub fn generate_hidden_includes(unique_id: &str, def_crate: &str) -> TokenStream /// Generates the path to the frame crate deps. fn get_frame_crate_path(def_crate: &str) -> Option { // This does not work if the frame crate is renamed. - if let Ok(FoundCrate::Name(name)) = crate_name(&"frame") { + if let Ok(FoundCrate::Name(name)) = + crate_name(&"polkadot-sdk-frame").or_else(|_| crate_name(&"frame")) + { let path = format!("{}::deps::{}", name, def_crate.to_string().replace("-", "_")); Some(syn::parse_str::(&path).expect("is a valid path; qed")) } else { diff --git a/substrate/frame/support/test/stg_frame_crate/Cargo.toml b/substrate/frame/support/test/stg_frame_crate/Cargo.toml index 295b2a1a524..554c81ab43d 100644 --- a/substrate/frame/support/test/stg_frame_crate/Cargo.toml +++ b/substrate/frame/support/test/stg_frame_crate/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -frame = { path = "../../..", default-features = false, features = ["experimental", "runtime"] } +frame = { package = "polkadot-sdk-frame", path = "../../..", default-features = false, features = ["experimental", "runtime"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [features] diff --git a/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs index 573ceb6dfab..b510beb54dd 100644 --- a/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs +++ b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs @@ -17,13 +17,13 @@ #[frame_support::pallet] mod pallet { - use frame::deps::frame_system::pallet_prelude::BlockNumberFor; + use polkadot_sdk_frame::deps::frame_system::pallet_prelude::BlockNumberFor; use frame_support::pallet_prelude::{Hooks, IsType}; #[pallet::config] - pub trait Config: frame::deps::frame_system::Config { + pub trait Config: polkadot_sdk_frame::deps::frame_system::Config { type Bar: Clone + std::fmt::Debug + Eq; - type RuntimeEvent: IsType<::RuntimeEvent> + type RuntimeEvent: IsType<::RuntimeEvent> + From>; } diff --git a/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr index 0f805c972e4..384e44d97a6 100644 --- a/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr @@ -1,5 +1,5 @@ error: Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must bound: `IsType<::RuntimeEvent>` --> tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs:26:3 | -26 | type RuntimeEvent: IsType<::RuntimeEvent> +26 | type RuntimeEvent: IsType<::RuntimeEvent> | ^^^^ diff --git a/substrate/primitives/api/proc-macro/src/utils.rs b/substrate/primitives/api/proc-macro/src/utils.rs index c8c1f12d90a..a6570a98f1f 100644 --- a/substrate/primitives/api/proc-macro/src/utils.rs +++ b/substrate/primitives/api/proc-macro/src/utils.rs @@ -34,7 +34,9 @@ pub fn generate_crate_access() -> TokenStream { quote!(#renamed_name::__private) }, Err(e) => - if let Ok(FoundCrate::Name(name)) = crate_name(&"frame") { + if let Ok(FoundCrate::Name(name)) = + crate_name(&"polkadot-sdk-frame").or_else(|_| crate_name(&"frame")) + { let path = format!("{}::deps::sp_api::__private", name); let path = syn::parse_str::(&path).expect("is a valid path; qed"); quote!( #path ) diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index 80a5d27d8c2..5fbfc0530bb 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -48,7 +48,7 @@ use std::{ }; /// Logging target -const LOG_TARGET: &'static str = "frame::benchmark::pallet"; +const LOG_TARGET: &'static str = "polkadot_sdk_frame::benchmark::pallet"; /// The inclusive range of a component. #[derive(Serialize, Debug, Clone, Eq, PartialEq)] diff --git a/substrate/utils/frame/rpc/support/src/lib.rs b/substrate/utils/frame/rpc/support/src/lib.rs index a839bbc3402..dea822167ff 100644 --- a/substrate/utils/frame/rpc/support/src/lib.rs +++ b/substrate/utils/frame/rpc/support/src/lib.rs @@ -161,7 +161,7 @@ impl StorageQuery { /// Send this query over RPC, await the typed result. /// - /// Hash should be `::Hash`. + /// Hash should be `::Hash`. /// /// # Arguments /// diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index 0668304e502..606fd058035 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -49,7 +49,7 @@ substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system # Once the native runtime is gone, there should be little to no dependency on FRAME here, and # certainly no dependency on the runtime. -frame = { path = "../../../substrate/frame", features = [ +frame = { package = "polkadot-sdk-frame", path = "../../../substrate/frame", features = [ "experimental", "runtime", ] } diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml index a85391f2942..909ba034454 100644 --- a/templates/minimal/pallets/template/Cargo.toml +++ b/templates/minimal/pallets/template/Cargo.toml @@ -22,7 +22,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } -frame = { path = "../../../../substrate/frame", default-features = false, features = [ +frame = { package = "polkadot-sdk-frame", path = "../../../../substrate/frame", default-features = false, features = [ "experimental", "runtime", ] } diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml index a99a1e43f85..e7f88ca47af 100644 --- a/templates/minimal/runtime/Cargo.toml +++ b/templates/minimal/runtime/Cargo.toml @@ -17,7 +17,7 @@ parity-scale-codec = { version = "3.0.0", default-features = false } scale-info = { version = "2.6.0", default-features = false } # this is a frame-based runtime, thus importing `frame` with runtime feature enabled. -frame = { path = "../../../substrate/frame", default-features = false, features = [ +frame = { package = "polkadot-sdk-frame", path = "../../../substrate/frame", default-features = false, features = [ "experimental", "runtime", ] } -- GitLab From ebdca15c19cf5f4f43c9fde91d77b9dc8c272281 Mon Sep 17 00:00:00 2001 From: Vladimir Istyufeev Date: Thu, 4 Apr 2024 12:22:18 +0400 Subject: [PATCH 096/128] Convince GitLab not to crop collapsed multiline strings (#3971) Use of `- >` instead of `- |` workarounds GitLab quirk when it crops collapsed multiline `script:` section commands in its CI job logs. This PR also fixes `- |` based `script:` steps to behave properly after `- >` conversion. Resolves https://github.com/paritytech/ci_cd/issues/972. --- .gitlab-ci.yml | 6 +++--- .gitlab/pipeline/build.yml | 28 ++++++++++++++-------------- .gitlab/pipeline/check.yml | 29 +++++++++++++---------------- .gitlab/pipeline/publish.yml | 14 +++++++------- .gitlab/pipeline/test.yml | 6 +++--- 5 files changed, 40 insertions(+), 43 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 93a6ccb9f8f..fbcd94b5256 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -125,10 +125,10 @@ default: - cp $FL_FORKLIFT_CONFIG ~/.forklift/config.toml - shopt -s expand_aliases - export PATH=$PATH:$(pwd) - - | + - > if [ "$FORKLIFT_BYPASS" != "true" ]; then - echo "FORKLIFT_BYPASS not set, creating alias cargo='forklift cargo'" - alias cargo="forklift cargo" + echo "FORKLIFT_BYPASS not set, creating alias cargo='forklift cargo'"; + alias cargo="forklift cargo"; fi # - echo "FL_FORKLIFT_VERSION ${FL_FORKLIFT_VERSION}" diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 44d66eb2f5e..3c870e576a8 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -105,26 +105,26 @@ build-rustdoc: - mv ./target/doc ./crate-docs # Inject Simple Analytics (https://www.simpleanalytics.com/) privacy preserving tracker into # all .html files - - | + - > inject_simple_analytics() { - local path="$1" - local script_content="" + local path="$1"; + local script_content=""; # Function that inject script into the head of an html file using sed. process_file() { - local file="$1" - echo "Adding Simple Analytics script to $file" - sed -i "s||$script_content|" "$file" - } - export -f process_file - # xargs runs process_file in seperate shells without access to outer variables. - # to make script_content available inside process_file, export it as an env var here. - export script_content + local file="$1"; + echo "Adding Simple Analytics script to $file"; + sed -i "s||$script_content|" "$file"; + }; + export -f process_file; + # xargs runs process_file in separate shells without access to outer variables. + # make script_content available inside process_file, export it as an env var here. + export script_content; # Modify .html files in parallel using xargs, otherwise it can take a long time. - find "$path" -name '*.html' | xargs -I {} -P "$(nproc)" bash -c 'process_file "$@"' _ {} - } - inject_simple_analytics "./crate-docs" + find "$path" -name '*.html' | xargs -I {} -P "$(nproc)" bash -c 'process_file "$@"' _ {}; + }; + inject_simple_analytics "./crate-docs"; - echo "" > ./crate-docs/index.html build-implementers-guide: diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 52da3355050..4c39539f1e5 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -104,23 +104,20 @@ check-toml-format: - .docker-env - .test-pr-refs script: - - | - export RUST_LOG=remote-ext=debug,runtime=debug - - echo "---------- Downloading try-runtime CLI ----------" - curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.5.4/try-runtime-x86_64-unknown-linux-musl -o try-runtime - chmod +x ./try-runtime - echo "Using try-runtime-cli version:" - ./try-runtime --version - - echo "---------- Building ${PACKAGE} runtime ----------" - time cargo build --release --locked -p "$PACKAGE" --features try-runtime - - echo "---------- Executing on-runtime-upgrade for ${NETWORK} ----------" + - export RUST_LOG=remote-ext=debug,runtime=debug + - echo "---------- Downloading try-runtime CLI ----------" + - curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.5.4/try-runtime-x86_64-unknown-linux-musl -o try-runtime + - chmod +x ./try-runtime + - echo "Using try-runtime-cli version:" + - ./try-runtime --version + - echo "---------- Building ${PACKAGE} runtime ----------" + - time cargo build --release --locked -p "$PACKAGE" --features try-runtime + - echo "---------- Executing on-runtime-upgrade for ${NETWORK} ----------" + - > time ./try-runtime ${COMMAND_EXTRA_ARGS} \ - --runtime ./target/release/wbuild/"$PACKAGE"/"$WASM" \ - on-runtime-upgrade --disable-spec-version-check --checks=all ${SUBCOMMAND_EXTRA_ARGS} live --uri ${URI} - sleep 5 + --runtime ./target/release/wbuild/"$PACKAGE"/"$WASM" \ + on-runtime-upgrade --disable-spec-version-check --checks=all ${SUBCOMMAND_EXTRA_ARGS} live --uri ${URI} + - sleep 5 # Check runtime migrations for Parity managed relay chains check-runtime-migration-westend: diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index a37ba012a8a..954df10bef0 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -113,17 +113,17 @@ trigger_workflow: artifacts: true script: - echo "Triggering workflow" - - | + - > for benchmark in $(ls charts/*.json); do - export bencmark_name=$(basename $benchmark) - echo "Benchmark: $bencmark_name" - export benchmark_dir=$(echo $bencmark_name | sed 's/\.json//') + export benchmark_name=$(basename $benchmark); + echo "Benchmark: $benchmark_name"; + export benchmark_dir=$(echo $benchmark_name | sed 's/\.json//'); curl -q -X POST \ -H "Accept: application/vnd.github.v3+json" \ -H "Authorization: token $GITHUB_TOKEN" \ - https://api.github.com/repos/paritytech/${CI_PROJECT_NAME}/actions/workflows/subsystem-benchmarks.yml/dispatches \ - -d '{"ref":"refs/heads/master","inputs":{"benchmark-data-dir-path":"'$benchmark_dir'","output-file-path":"'$bencmark_name'"}}' - sleep 300 + https://api.github.com/repos/paritytech/${CI_PROJECT_NAME}/actions/workflows/subsystem-benchmarks.yml/dispatches \ + -d "{\"ref\":\"refs/heads/master\",\"inputs\":{\"benchmark-data-dir-path\":\"$benchmark_dir\",\"output-file-path\":\"$benchmark_name\"}}"; + sleep 300; done allow_failure: true diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 48c84b472b4..9db89b92300 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -23,7 +23,7 @@ test-linux-stable: - echo "Node index - ${CI_NODE_INDEX}. Total amount - ${CI_NODE_TOTAL}" # add experimental to features after https://github.com/paritytech/substrate/pull/14502 is merged # "upgrade_version_checks_should_work" is currently failing - - | + - > time cargo nextest run \ --workspace \ --locked \ @@ -34,7 +34,7 @@ test-linux-stable: # Upload tests results to Elasticsearch - echo "Upload test results to Elasticsearch" - cat target/nextest/default/junit.xml | xq . > target/nextest/default/junit.json - - | + - > curl -v -XPOST --http1.1 \ -u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD} \ https://elasticsearch.parity-build.parity.io/unit-tests/_doc/${CI_JOB_ID} \ @@ -87,7 +87,7 @@ test-linux-stable-runtime-benchmarks: # script: # # Build all but only execute 'runtime' tests. # - echo "Node index - ${CI_NODE_INDEX}. Total amount - ${CI_NODE_TOTAL}" -# - | +# - > # time cargo nextest run \ # --workspace \ # --locked \ -- GitLab From 0bbda78d86bc6210cda123042d817aeaf45b3d77 Mon Sep 17 00:00:00 2001 From: Lulu Date: Thu, 4 Apr 2024 10:26:53 +0100 Subject: [PATCH 097/128] Use 0.1.0 as minimum version for crates (#3941) CI will be enforcing this with next parity-publish release --- Cargo.lock | 8 ++++---- cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml | 2 +- substrate/frame/parameters/Cargo.toml | 2 +- substrate/primitives/crypto/hashing/Cargo.toml | 2 +- substrate/primitives/crypto/hashing/proc-macro/Cargo.toml | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fc340ff1119..4b583c1c6a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1922,7 +1922,7 @@ dependencies = [ [[package]] name = "bridge-hub-common" -version = "0.0.0" +version = "0.1.0" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -10469,7 +10469,7 @@ dependencies = [ [[package]] name = "pallet-parameters" -version = "0.0.1" +version = "0.1.0" dependencies = [ "docify", "frame-benchmarking", @@ -18728,7 +18728,7 @@ dependencies = [ [[package]] name = "sp-crypto-hashing" -version = "0.0.0" +version = "0.1.0" dependencies = [ "blake2b_simd", "byteorder", @@ -18742,7 +18742,7 @@ dependencies = [ [[package]] name = "sp-crypto-hashing-proc-macro" -version = "0.0.0" +version = "0.1.0" dependencies = [ "quote", "sp-crypto-hashing", diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml index 2ab6ee7995f..2f5f783ce48 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bridge-hub-common" -version = "0.0.0" +version = "0.1.0" authors.workspace = true edition.workspace = true description = "Bridge hub common utilities" diff --git a/substrate/frame/parameters/Cargo.toml b/substrate/frame/parameters/Cargo.toml index 2527bdf3a71..b718b391019 100644 --- a/substrate/frame/parameters/Cargo.toml +++ b/substrate/frame/parameters/Cargo.toml @@ -3,7 +3,7 @@ name = "pallet-parameters" description = "Pallet to store and configure parameters." repository.workspace = true license = "Apache-2.0" -version = "0.0.1" +version = "0.1.0" authors = ["Acala Developers", "Parity Technologies "] edition.workspace = true diff --git a/substrate/primitives/crypto/hashing/Cargo.toml b/substrate/primitives/crypto/hashing/Cargo.toml index 3077e1e715e..096650e231c 100644 --- a/substrate/primitives/crypto/hashing/Cargo.toml +++ b/substrate/primitives/crypto/hashing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-crypto-hashing" -version = "0.0.0" +version = "0.1.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" diff --git a/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml b/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml index f244b02ca10..f988042d307 100644 --- a/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml +++ b/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-crypto-hashing-proc-macro" -version = "0.0.0" +version = "0.1.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" -- GitLab From 0ef37c75401b78b61ed35ce27af8b964da27bb3c Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Thu, 4 Apr 2024 22:32:01 +1100 Subject: [PATCH 098/128] Fix Mermaid diagram rendering (#3875) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes https://github.com/paritytech/polkadot-sdk/issues/2977 The issue appears to stem from the `aquamarine` crate failing to render diagrams in re-exported crates. e.g. as raised [here](https://github.com/paritytech/polkadot-sdk/issues/2977), diagrams would render at `frame_support::traits::Hooks` but not the re-exported doc `frame::traits::Hooks`, even if I added `aquamarine` as a `frame` crate dependency. To resolve this, I followed advice in https://github.com/mersinvald/aquamarine/issues/20 to instead render mermaid diagrams directly using JS by adding an `after-content.js`. --- Also fixes compile warnings, enables `--all-features` and disallows future warnings in CI. --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Bastian Köcher --- .gitlab/pipeline/build.yml | 5 ++--- cumulus/test/service/src/chain_spec.rs | 2 +- docs/sdk/assets/after-content.html | 2 ++ docs/sdk/{headers => assets}/header.html | 2 ++ docs/sdk/{headers => assets}/theme.css | 0 prdoc/pr_3875.prdoc | 11 +++++++++++ substrate/frame/assets/src/lib.rs | 3 +-- 7 files changed, 19 insertions(+), 6 deletions(-) create mode 100644 docs/sdk/assets/after-content.html rename docs/sdk/{headers => assets}/header.html (97%) rename docs/sdk/{headers => assets}/theme.css (100%) create mode 100644 prdoc/pr_3875.prdoc diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 3c870e576a8..8658e92efc8 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -91,7 +91,7 @@ build-rustdoc: - .run-immediately variables: SKIP_WASM_BUILD: 1 - RUSTDOCFLAGS: "--default-theme=ayu --html-in-header ./docs/sdk/headers/header.html --extend-css ./docs/sdk/headers/theme.css" + RUSTDOCFLAGS: "-Dwarnings --default-theme=ayu --html-in-header ./docs/sdk/assets/header.html --extend-css ./docs/sdk/assets/theme.css --html-after-content ./docs/sdk/assets/after-content.html" artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc" when: on_success @@ -99,8 +99,7 @@ build-rustdoc: paths: - ./crate-docs/ script: - # FIXME: it fails with `RUSTDOCFLAGS="-Dwarnings"` and `--all-features` - - time cargo doc --features try-runtime,experimental --workspace --no-deps + - time cargo doc --all-features --workspace --no-deps - rm -f ./target/doc/.lock - mv ./target/doc ./crate-docs # Inject Simple Analytics (https://www.simpleanalytics.com/) privacy preserving tracker into diff --git a/cumulus/test/service/src/chain_spec.rs b/cumulus/test/service/src/chain_spec.rs index 61bbf755d89..e86023576ac 100644 --- a/cumulus/test/service/src/chain_spec.rs +++ b/cumulus/test/service/src/chain_spec.rs @@ -34,7 +34,7 @@ pub fn get_from_seed(seed: &str) -> ::Pu .public() } -/// The extensions for the [`ChainSpec`](crate::ChainSpec). +/// The extensions for the [`ChainSpec`]. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] #[serde(deny_unknown_fields)] pub struct Extensions { diff --git a/docs/sdk/assets/after-content.html b/docs/sdk/assets/after-content.html new file mode 100644 index 00000000000..30ae5c7ec43 --- /dev/null +++ b/docs/sdk/assets/after-content.html @@ -0,0 +1,2 @@ + + diff --git a/docs/sdk/headers/header.html b/docs/sdk/assets/header.html similarity index 97% rename from docs/sdk/headers/header.html rename to docs/sdk/assets/header.html index e28458c4ccc..f55c31b5321 100644 --- a/docs/sdk/headers/header.html +++ b/docs/sdk/assets/header.html @@ -84,6 +84,8 @@ }); + +