From 3f0383a5b8b6861c0ee0c64a17bed78c07956ba5 Mon Sep 17 00:00:00 2001 From: Vincent Geddes Date: Fri, 10 Nov 2023 15:12:47 +0200 Subject: [PATCH 01/74] [pallet-message-queue] Implement impl_trait_for_tuples for QueuePausedQuery (#2227) These changes are required so that the bridgehub system runtimes can more easily be configured with multiple message processors Example usage: ```rust use frame_support::traits::QueuePausedQuery; impl pallet_message_queue::Config for Runtime { type QueuePausedQuery = (A, B, C) } --- substrate/frame/support/src/traits/messages.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index 654380dbb0a..58815b107c8 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -240,8 +240,14 @@ pub trait QueuePausedQuery { fn is_paused(origin: &Origin) -> bool; } -impl QueuePausedQuery for () { - fn is_paused(_: &Origin) -> bool { +#[impl_trait_for_tuples::impl_for_tuples(8)] +impl QueuePausedQuery for Tuple { + fn is_paused(origin: &Origin) -> bool { + for_tuples!( #( + if Tuple::is_paused(origin) { + return true; + } + )* ); false } } -- GitLab From 84ddbaf68445fed23a88a086ee37bdcdbcb7356a Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Fri, 10 Nov 2023 17:14:05 +0400 Subject: [PATCH 02/74] Improve `VersionedMigration` naming conventions (#2264) As suggested by @ggwpez (https://github.com/paritytech/polkadot-sdk/pull/2142#discussion_r1388145872), remove the `VersionChecked` prefix from version checked migrations (but leave `VersionUnchecked` prefixes) --------- Co-authored-by: command-bot <> --- .../common/src/assigned_slots/migration.rs | 10 +++++----- .../common/src/paras_registrar/migration.rs | 15 +++++++-------- polkadot/runtime/rococo/src/lib.rs | 6 +++--- polkadot/runtime/westend/src/lib.rs | 4 ++-- polkadot/xcm/pallet-xcm/src/migration.rs | 4 ++-- substrate/frame/society/src/migrations.rs | 15 +++++++-------- substrate/frame/support/src/migrations.rs | 4 ++-- 7 files changed, 28 insertions(+), 30 deletions(-) diff --git a/polkadot/runtime/common/src/assigned_slots/migration.rs b/polkadot/runtime/common/src/assigned_slots/migration.rs index 0e88b27a1ff..ba3108c0aa3 100644 --- a/polkadot/runtime/common/src/assigned_slots/migration.rs +++ b/polkadot/runtime/common/src/assigned_slots/migration.rs @@ -25,8 +25,8 @@ use sp_std::vec::Vec; pub mod v1 { use super::*; - pub struct MigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for MigrateToV1 { + pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { let onchain_version = Pallet::::on_chain_storage_version(); @@ -60,13 +60,13 @@ pub mod v1 { } } - /// [`MigrateToV1`] wrapped in a + /// [`VersionUncheckedMigrateToV1`] wrapped in a /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), ensuring the /// migration is only performed when on-chain version is 0. - pub type VersionCheckedMigrateToV1 = frame_support::migrations::VersionedMigration< + pub type MigrateToV1 = frame_support::migrations::VersionedMigration< 0, 1, - MigrateToV1, + VersionUncheckedMigrateToV1, Pallet, ::DbWeight, >; diff --git a/polkadot/runtime/common/src/paras_registrar/migration.rs b/polkadot/runtime/common/src/paras_registrar/migration.rs index b767985489d..f977674a1e4 100644 --- a/polkadot/runtime/common/src/paras_registrar/migration.rs +++ b/polkadot/runtime/common/src/paras_registrar/migration.rs @@ -60,11 +60,10 @@ impl> OnRuntimeUpgrade } } -pub type VersionCheckedMigrateToV1 = - frame_support::migrations::VersionedMigration< - 0, - 1, - VersionUncheckedMigrateToV1, - super::Pallet, - ::DbWeight, - >; +pub type MigrateToV1 = frame_support::migrations::VersionedMigration< + 0, + 1, + VersionUncheckedMigrateToV1, + super::Pallet, + ::DbWeight, +>; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 4f542354346..697d22c311a 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1472,14 +1472,14 @@ pub mod migrations { /// Unreleased migrations. Add new ones here: pub type Unreleased = ( - pallet_society::migrations::VersionCheckedMigrateToV2, + pallet_society::migrations::MigrateToV2, pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, - assigned_slots::migration::v1::VersionCheckedMigrateToV1, + assigned_slots::migration::v1::MigrateToV1, parachains_scheduler::migration::v1::MigrateToV1, parachains_configuration::migration::v8::MigrateToV8, parachains_configuration::migration::v9::MigrateToV9, - paras_registrar::migration::VersionCheckedMigrateToV1, + paras_registrar::migration::MigrateToV1, pallet_referenda::migration::v1::MigrateV0ToV1, pallet_referenda::migration::v1::MigrateV0ToV1, diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index ec94973af4f..fe9ed22f437 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1549,12 +1549,12 @@ pub mod migrations { pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, pallet_staking::migrations::v14::MigrateToV14, - assigned_slots::migration::v1::VersionCheckedMigrateToV1, + assigned_slots::migration::v1::MigrateToV1, parachains_scheduler::migration::v1::MigrateToV1, parachains_configuration::migration::v8::MigrateToV8, UpgradeSessionKeys, parachains_configuration::migration::v9::MigrateToV9, - paras_registrar::migration::VersionCheckedMigrateToV1, + paras_registrar::migration::MigrateToV1, pallet_nomination_pools::migration::versioned_migrations::V5toV6, pallet_referenda::migration::v1::MigrateV0ToV1, pallet_nomination_pools::migration::versioned_migrations::V6ToV7, diff --git a/polkadot/xcm/pallet-xcm/src/migration.rs b/polkadot/xcm/pallet-xcm/src/migration.rs index ba3cdb5c51e..2793afcc910 100644 --- a/polkadot/xcm/pallet-xcm/src/migration.rs +++ b/polkadot/xcm/pallet-xcm/src/migration.rs @@ -31,7 +31,7 @@ pub mod v1 { /// checking, the version checking is not complete as it will begin failing after the upgrade is /// enacted on-chain. /// - /// Use experimental [`VersionCheckedMigrateToV1`] instead. + /// Use experimental [`MigrateToV1`] instead. pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { @@ -65,7 +65,7 @@ pub mod v1 { /// /// Wrapped in [`frame_support::migrations::VersionedMigration`] so the pre/post checks don't /// begin failing after the upgrade is enacted on-chain. - pub type VersionCheckedMigrateToV1 = frame_support::migrations::VersionedMigration< + pub type MigrateToV1 = frame_support::migrations::VersionedMigration< 0, 1, VersionUncheckedMigrateToV1, diff --git a/substrate/frame/society/src/migrations.rs b/substrate/frame/society/src/migrations.rs index 553eea1a795..a995c9d7be7 100644 --- a/substrate/frame/society/src/migrations.rs +++ b/substrate/frame/society/src/migrations.rs @@ -95,14 +95,13 @@ impl< /// [`VersionUncheckedMigrateToV2`] wrapped in a [`frame_support::migrations::VersionedMigration`], /// ensuring the migration is only performed when on-chain version is 0. -pub type VersionCheckedMigrateToV2 = - frame_support::migrations::VersionedMigration< - 0, - 2, - VersionUncheckedMigrateToV2, - crate::pallet::Pallet, - ::DbWeight, - >; +pub type MigrateToV2 = frame_support::migrations::VersionedMigration< + 0, + 2, + VersionUncheckedMigrateToV2, + crate::pallet::Pallet, + ::DbWeight, +>; pub(crate) mod old { use super::*; diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index 22471e883a7..a9eb460421f 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -51,7 +51,7 @@ use sp_std::marker::PhantomData; /// // OnRuntimeUpgrade implementation... /// } /// -/// pub type VersionCheckedMigrateV5ToV6 = +/// pub type MigrateV5ToV6 = /// VersionedMigration< /// 5, /// 6, @@ -63,7 +63,7 @@ use sp_std::marker::PhantomData; /// // Migrations tuple to pass to the Executive pallet: /// pub type Migrations = ( /// // other migrations... -/// VersionCheckedMigrateV5ToV6, +/// MigrateV5ToV6, /// // other migrations... /// ); /// ``` -- GitLab From 6b7be115fd523057d01cea46e9680a62a0b08d97 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Fri, 10 Nov 2023 16:38:24 +0100 Subject: [PATCH 03/74] Contracts: Add XCM traits to interface with contracts (#2086) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are introducing a new set of `XcmController` traits (final name yet to be determined). These traits are implemented by `pallet-xcm` and allows other pallets, such as `pallet_contracts`, to rely on these traits instead of tight coupling them to `pallet-xcm`. Using only the existing Xcm traits would mean duplicating the logic from `pallet-xcm` in these other pallets, which we aim to avoid. Our objective is to ensure that when these APIs are called from `pallet-contracts`, they produce the exact same outcomes as if called directly from `pallet-xcm`. The other benefits is that we can also expose return values to `pallet-contracts` instead of just calling `pallet-xcm` dispatchable and getting a `DispatchResult` back. See traits integration in this PR https://github.com/paritytech/polkadot-sdk/pull/1248, where the traits are used as follow to define and implement `pallet-contracts` Config. ```rs // Contracts config: pub trait Config: frame_system::Config { // ... /// A type that exposes XCM APIs, allowing contracts to interact with other parachains, and /// execute XCM programs. type Xcm: xcm_executor::traits::Controller< OriginFor, ::RuntimeCall, BlockNumberFor, >; } // implementation impl pallet_contracts::Config for Runtime { // ... type Xcm = pallet_xcm::Pallet; } ``` --------- Co-authored-by: Alexander Theißen Co-authored-by: command-bot <> --- .../src/weights/pallet_xcm.rs | 187 ++++++---- .../src/weights/pallet_xcm.rs | 163 +++++---- .../src/weights/pallet_xcm.rs | 182 ++++++---- .../src/weights/pallet_xcm.rs | 161 +++++---- .../src/weights/pallet_xcm.rs | 151 +++++--- .../src/weights/pallet_xcm.rs | 159 +++++---- .../src/weights/pallet_xcm.rs | 163 +++++---- .../src/weights/pallet_xcm.rs | 27 ++ .../src/weights/pallet_xcm.rs | 215 ++++++----- .../runtime/rococo/src/weights/pallet_xcm.rs | 329 ++++++++--------- .../runtime/westend/src/weights/pallet_xcm.rs | 333 +++++++++--------- polkadot/xcm/pallet-xcm/Cargo.toml | 3 +- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 27 ++ polkadot/xcm/pallet-xcm/src/lib.rs | 157 +++++++-- polkadot/xcm/xcm-builder/src/controller.rs | 187 ++++++++++ polkadot/xcm/xcm-builder/src/lib.rs | 6 + polkadot/xcm/xcm-builder/src/tests/mock.rs | 2 +- .../xcm-executor/src/traits/on_response.rs | 46 ++- prdoc/pr_2086.prdoc | 15 + 19 files changed, 1602 insertions(+), 911 deletions(-) create mode 100644 polkadot/xcm/xcm-builder/src/controller.rs create mode 100644 prdoc/pr_2086.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs index becfca7a891..1e4a723e10f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -61,12 +62,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 30_015_000 picoseconds. - Weight::from_parts(30_576_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 25_043_000 picoseconds. + Weight::from_parts(25_670_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -75,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 24_785_000 picoseconds. - Weight::from_parts(25_097_000, 0) + // Minimum execution time: 18_893_000 picoseconds. + Weight::from_parts(19_261_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -86,8 +87,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 18_561_000 picoseconds. - Weight::from_parts(19_121_000, 0) + // Minimum execution time: 14_107_000 picoseconds. + Weight::from_parts(14_500_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -107,8 +108,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_298_000 picoseconds. - Weight::from_parts(9_721_000, 0) + // Minimum execution time: 7_175_000 picoseconds. + Weight::from_parts(7_493_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,8 +119,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_912_000 picoseconds. - Weight::from_parts(3_262_000, 0) + // Minimum execution time: 2_162_000 picoseconds. + Weight::from_parts(2_278_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -127,6 +128,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -141,16 +144,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 35_127_000 picoseconds. - Weight::from_parts(36_317_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 29_144_000 picoseconds. + Weight::from_parts(30_134_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -165,12 +170,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `326` - // Estimated: `3791` - // Minimum execution time: 36_634_000 picoseconds. - Weight::from_parts(37_983_000, 0) - .saturating_add(Weight::from_parts(0, 3791)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 31_522_000 picoseconds. + Weight::from_parts(32_679_000, 0) + .saturating_add(Weight::from_parts(0, 3828)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -179,8 +184,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_940_000 picoseconds. - Weight::from_parts(3_085_000, 0) + // Minimum execution time: 2_338_000 picoseconds. + Weight::from_parts(2_494_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -190,8 +195,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `162` // Estimated: `11052` - // Minimum execution time: 17_400_000 picoseconds. - Weight::from_parts(17_759_000, 0) + // Minimum execution time: 17_315_000 picoseconds. + Weight::from_parts(17_787_000, 0) .saturating_add(Weight::from_parts(0, 11052)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -202,8 +207,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `166` // Estimated: `11056` - // Minimum execution time: 17_287_000 picoseconds. - Weight::from_parts(17_678_000, 0) + // Minimum execution time: 17_273_000 picoseconds. + Weight::from_parts(17_712_000, 0) .saturating_add(Weight::from_parts(0, 11056)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -214,13 +219,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `13538` - // Minimum execution time: 18_941_000 picoseconds. - Weight::from_parts(19_285_000, 0) + // Minimum execution time: 18_395_000 picoseconds. + Weight::from_parts(19_095_000, 0) .saturating_add(Weight::from_parts(0, 13538)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -233,12 +240,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `176` - // Estimated: `6116` - // Minimum execution time: 32_668_000 picoseconds. - Weight::from_parts(33_533_000, 0) - .saturating_add(Weight::from_parts(0, 6116)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `212` + // Estimated: `6152` + // Minimum execution time: 27_343_000 picoseconds. + Weight::from_parts(28_068_000, 0) + .saturating_add(Weight::from_parts(0, 6152)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -247,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `8621` - // Minimum execution time: 9_182_000 picoseconds. - Weight::from_parts(9_498_000, 0) + // Minimum execution time: 9_156_000 picoseconds. + Weight::from_parts(9_552_000, 0) .saturating_add(Weight::from_parts(0, 8621)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -258,14 +265,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `11063` - // Minimum execution time: 17_519_000 picoseconds. - Weight::from_parts(17_943_000, 0) + // Minimum execution time: 17_454_000 picoseconds. + Weight::from_parts(17_831_000, 0) .saturating_add(Weight::from_parts(0, 11063)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -278,12 +287,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `179` - // Estimated: `11069` - // Minimum execution time: 38_680_000 picoseconds. - Weight::from_parts(39_984_000, 0) - .saturating_add(Weight::from_parts(0, 11069)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `215` + // Estimated: `11105` + // Minimum execution time: 34_299_000 picoseconds. + Weight::from_parts(35_156_000, 0) + .saturating_add(Weight::from_parts(0, 11105)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 4_508_000 picoseconds. + Weight::from_parts(4_702_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_557_000 picoseconds. + Weight::from_parts(26_980_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs index 0d3fe0adb1b..27867e278ed 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=asset-hub-polkadot-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -63,10 +64,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3540` - // Minimum execution time: 28_284_000 picoseconds. - Weight::from_parts(29_186_000, 0) + // Minimum execution time: 25_203_000 picoseconds. + Weight::from_parts(25_927_000, 0) .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -75,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 24_830_000 picoseconds. - Weight::from_parts(26_312_000, 0) + // Minimum execution time: 20_113_000 picoseconds. + Weight::from_parts(20_439_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -86,8 +87,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 18_584_000 picoseconds. - Weight::from_parts(19_083_000, 0) + // Minimum execution time: 14_959_000 picoseconds. + Weight::from_parts(15_264_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -107,8 +108,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_415_000 picoseconds. - Weight::from_parts(9_821_000, 0) + // Minimum execution time: 7_399_000 picoseconds. + Weight::from_parts(7_674_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,8 +119,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_902_000 picoseconds. - Weight::from_parts(3_377_000, 0) + // Minimum execution time: 2_388_000 picoseconds. + Weight::from_parts(2_522_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -127,6 +128,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -143,14 +146,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3540` - // Minimum execution time: 32_730_000 picoseconds. - Weight::from_parts(33_879_000, 0) + // Minimum execution time: 28_791_000 picoseconds. + Weight::from_parts(29_443_000, 0) .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -165,12 +170,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `257` - // Estimated: `3722` - // Minimum execution time: 34_053_000 picoseconds. - Weight::from_parts(34_506_000, 0) - .saturating_add(Weight::from_parts(0, 3722)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `292` + // Estimated: `3757` + // Minimum execution time: 30_880_000 picoseconds. + Weight::from_parts(31_675_000, 0) + .saturating_add(Weight::from_parts(0, 3757)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -179,8 +184,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_824_000 picoseconds. - Weight::from_parts(2_986_000, 0) + // Minimum execution time: 2_365_000 picoseconds. + Weight::from_parts(2_550_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -190,8 +195,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `129` // Estimated: `11019` - // Minimum execution time: 17_011_000 picoseconds. - Weight::from_parts(17_488_000, 0) + // Minimum execution time: 17_185_000 picoseconds. + Weight::from_parts(17_680_000, 0) .saturating_add(Weight::from_parts(0, 11019)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -202,8 +207,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `133` // Estimated: `11023` - // Minimum execution time: 17_191_000 picoseconds. - Weight::from_parts(17_784_000, 0) + // Minimum execution time: 16_974_000 picoseconds. + Weight::from_parts(17_660_000, 0) .saturating_add(Weight::from_parts(0, 11023)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -214,13 +219,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `140` // Estimated: `13505` - // Minimum execution time: 18_625_000 picoseconds. - Weight::from_parts(19_177_000, 0) + // Minimum execution time: 18_536_000 picoseconds. + Weight::from_parts(19_292_000, 0) .saturating_add(Weight::from_parts(0, 13505)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -235,10 +242,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 30_762_000 picoseconds. - Weight::from_parts(31_481_000, 0) + // Minimum execution time: 27_368_000 picoseconds. + Weight::from_parts(28_161_000, 0) .saturating_add(Weight::from_parts(0, 6082)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -247,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `172` // Estimated: `8587` - // Minimum execution time: 9_025_000 picoseconds. - Weight::from_parts(9_423_000, 0) + // Minimum execution time: 9_553_000 picoseconds. + Weight::from_parts(9_899_000, 0) .saturating_add(Weight::from_parts(0, 8587)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -258,14 +265,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `140` // Estimated: `11030` - // Minimum execution time: 17_550_000 picoseconds. - Weight::from_parts(17_939_000, 0) + // Minimum execution time: 17_445_000 picoseconds. + Weight::from_parts(18_206_000, 0) .saturating_add(Weight::from_parts(0, 11030)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -280,10 +289,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `146` // Estimated: `11036` - // Minimum execution time: 36_922_000 picoseconds. - Weight::from_parts(37_709_000, 0) + // Minimum execution time: 34_200_000 picoseconds. + Weight::from_parts(35_198_000, 0) .saturating_add(Weight::from_parts(0, 11036)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `69` + // Estimated: `1554` + // Minimum execution time: 4_679_000 picoseconds. + Weight::from_parts(4_841_000, 0) + .saturating_add(Weight::from_parts(0, 1554)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7706` + // Estimated: `11171` + // Minimum execution time: 27_281_000 picoseconds. + Weight::from_parts(27_694_000, 0) + .saturating_add(Weight::from_parts(0, 11171)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs index 909d7f28907..afe85fdaf28 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=asset-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -62,35 +62,39 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 30_015_000 picoseconds. - Weight::from_parts(30_576_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 24_498_000 picoseconds. + Weight::from_parts(25_385_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 24_785_000 picoseconds. - Weight::from_parts(25_097_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `39` + // Estimated: `3504` + // Minimum execution time: 19_746_000 picoseconds. + Weight::from_parts(20_535_000, 0) + .saturating_add(Weight::from_parts(0, 3504)) + .saturating_add(T::DbWeight::get().reads(2)) } + /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 18_561_000 picoseconds. - Weight::from_parts(19_121_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `39` + // Estimated: `3504` + // Minimum execution time: 15_059_000 picoseconds. + Weight::from_parts(15_386_000, 0) + .saturating_add(Weight::from_parts(0, 3504)) + .saturating_add(T::DbWeight::get().reads(2)) } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -108,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_298_000 picoseconds. - Weight::from_parts(9_721_000, 0) + // Minimum execution time: 7_108_000 picoseconds. + Weight::from_parts(7_458_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -119,8 +123,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_912_000 picoseconds. - Weight::from_parts(3_262_000, 0) + // Minimum execution time: 2_205_000 picoseconds. + Weight::from_parts(2_360_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -128,6 +132,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -142,16 +148,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 35_127_000 picoseconds. - Weight::from_parts(36_317_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 29_099_000 picoseconds. + Weight::from_parts(29_580_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -166,12 +174,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `326` - // Estimated: `3791` - // Minimum execution time: 36_634_000 picoseconds. - Weight::from_parts(37_983_000, 0) - .saturating_add(Weight::from_parts(0, 3791)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 31_161_000 picoseconds. + Weight::from_parts(31_933_000, 0) + .saturating_add(Weight::from_parts(0, 3828)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -180,8 +188,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_940_000 picoseconds. - Weight::from_parts(3_085_000, 0) + // Minimum execution time: 2_158_000 picoseconds. + Weight::from_parts(2_316_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -191,8 +199,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `162` // Estimated: `11052` - // Minimum execution time: 17_400_000 picoseconds. - Weight::from_parts(17_759_000, 0) + // Minimum execution time: 16_934_000 picoseconds. + Weight::from_parts(17_655_000, 0) .saturating_add(Weight::from_parts(0, 11052)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -203,8 +211,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `166` // Estimated: `11056` - // Minimum execution time: 17_287_000 picoseconds. - Weight::from_parts(17_678_000, 0) + // Minimum execution time: 17_658_000 picoseconds. + Weight::from_parts(17_973_000, 0) .saturating_add(Weight::from_parts(0, 11056)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -215,13 +223,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `13538` - // Minimum execution time: 18_941_000 picoseconds. - Weight::from_parts(19_285_000, 0) + // Minimum execution time: 18_673_000 picoseconds. + Weight::from_parts(19_027_000, 0) .saturating_add(Weight::from_parts(0, 13538)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -234,12 +244,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `176` - // Estimated: `6116` - // Minimum execution time: 32_668_000 picoseconds. - Weight::from_parts(33_533_000, 0) - .saturating_add(Weight::from_parts(0, 6116)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `212` + // Estimated: `6152` + // Minimum execution time: 27_171_000 picoseconds. + Weight::from_parts(27_802_000, 0) + .saturating_add(Weight::from_parts(0, 6152)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -248,8 +258,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `8621` - // Minimum execution time: 9_182_000 picoseconds. - Weight::from_parts(9_498_000, 0) + // Minimum execution time: 9_423_000 picoseconds. + Weight::from_parts(9_636_000, 0) .saturating_add(Weight::from_parts(0, 8621)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -259,14 +269,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `11063` - // Minimum execution time: 17_519_000 picoseconds. - Weight::from_parts(17_943_000, 0) + // Minimum execution time: 17_442_000 picoseconds. + Weight::from_parts(17_941_000, 0) .saturating_add(Weight::from_parts(0, 11063)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -279,12 +291,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `179` - // Estimated: `11069` - // Minimum execution time: 38_680_000 picoseconds. - Weight::from_parts(39_984_000, 0) - .saturating_add(Weight::from_parts(0, 11069)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `215` + // Estimated: `11105` + // Minimum execution time: 34_340_000 picoseconds. + Weight::from_parts(34_934_000, 0) + .saturating_add(Weight::from_parts(0, 11105)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 5_496_000 picoseconds. + Weight::from_parts(5_652_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_140_000 picoseconds. + Weight::from_parts(26_824_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index 5c97d358591..340edafb0b0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-westend-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=asset-hub-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -63,10 +64,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 29_833_000 picoseconds. - Weight::from_parts(30_472_000, 0) + // Minimum execution time: 25_534_000 picoseconds. + Weight::from_parts(26_413_000, 0) .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -75,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 22_922_000 picoseconds. - Weight::from_parts(23_650_000, 0) + // Minimum execution time: 20_513_000 picoseconds. + Weight::from_parts(20_837_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -86,8 +87,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 17_468_000 picoseconds. - Weight::from_parts(18_068_000, 0) + // Minimum execution time: 14_977_000 picoseconds. + Weight::from_parts(15_207_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -95,8 +96,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_780_000 picoseconds. - Weight::from_parts(9_201_000, 0) + // Minimum execution time: 7_440_000 picoseconds. + Weight::from_parts(7_651_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) @@ -105,8 +106,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_886_000 picoseconds. - Weight::from_parts(9_102_000, 0) + // Minimum execution time: 7_253_000 picoseconds. + Weight::from_parts(7_584_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -116,8 +117,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_665_000 picoseconds. - Weight::from_parts(2_884_000, 0) + // Minimum execution time: 2_299_000 picoseconds. + Weight::from_parts(2_435_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -125,6 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -141,14 +144,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 34_513_000 picoseconds. - Weight::from_parts(36_207_000, 0) + // Minimum execution time: 29_440_000 picoseconds. + Weight::from_parts(30_675_000, 0) .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -165,10 +170,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 35_770_000 picoseconds. - Weight::from_parts(36_462_000, 0) + // Minimum execution time: 31_876_000 picoseconds. + Weight::from_parts(32_588_000, 0) .saturating_add(Weight::from_parts(0, 3828)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -177,8 +182,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_763_000 picoseconds. - Weight::from_parts(3_079_000, 0) + // Minimum execution time: 2_385_000 picoseconds. + Weight::from_parts(2_607_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -188,8 +193,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `162` // Estimated: `11052` - // Minimum execution time: 17_170_000 picoseconds. - Weight::from_parts(17_674_000, 0) + // Minimum execution time: 16_927_000 picoseconds. + Weight::from_parts(17_554_000, 0) .saturating_add(Weight::from_parts(0, 11052)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -200,8 +205,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `166` // Estimated: `11056` - // Minimum execution time: 16_857_000 picoseconds. - Weight::from_parts(17_407_000, 0) + // Minimum execution time: 16_965_000 picoseconds. + Weight::from_parts(17_807_000, 0) .saturating_add(Weight::from_parts(0, 11056)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -212,13 +217,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `13538` - // Minimum execution time: 19_040_000 picoseconds. - Weight::from_parts(19_550_000, 0) + // Minimum execution time: 18_763_000 picoseconds. + Weight::from_parts(19_359_000, 0) .saturating_add(Weight::from_parts(0, 13538)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -233,10 +240,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 31_623_000 picoseconds. - Weight::from_parts(32_646_000, 0) + // Minimum execution time: 27_371_000 picoseconds. + Weight::from_parts(28_185_000, 0) .saturating_add(Weight::from_parts(0, 6152)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -245,8 +252,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `8621` - // Minimum execution time: 9_148_000 picoseconds. - Weight::from_parts(9_402_000, 0) + // Minimum execution time: 9_165_000 picoseconds. + Weight::from_parts(9_539_000, 0) .saturating_add(Weight::from_parts(0, 8621)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -256,14 +263,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `11063` - // Minimum execution time: 17_630_000 picoseconds. - Weight::from_parts(17_941_000, 0) + // Minimum execution time: 17_384_000 picoseconds. + Weight::from_parts(17_777_000, 0) .saturating_add(Weight::from_parts(0, 11063)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -278,10 +287,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `215` // Estimated: `11105` - // Minimum execution time: 38_425_000 picoseconds. - Weight::from_parts(39_219_000, 0) + // Minimum execution time: 34_260_000 picoseconds. + Weight::from_parts(35_428_000, 0) .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 4_710_000 picoseconds. + Weight::from_parts(4_900_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_843_000 picoseconds. + Weight::from_parts(27_404_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs index 730bc492684..7f4c2026f2b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=bridge-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -63,10 +64,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 27_523_000 picoseconds. - Weight::from_parts(28_238_000, 0) + // Minimum execution time: 22_520_000 picoseconds. + Weight::from_parts(23_167_000, 0) .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -75,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1489` - // Minimum execution time: 24_139_000 picoseconds. - Weight::from_parts(24_806_000, 0) + // Minimum execution time: 19_639_000 picoseconds. + Weight::from_parts(20_230_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -106,8 +107,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_988_000 picoseconds. - Weight::from_parts(9_227_000, 0) + // Minimum execution time: 7_175_000 picoseconds. + Weight::from_parts(7_496_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -117,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_571_000 picoseconds. - Weight::from_parts(2_667_000, 0) + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_359_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -126,6 +127,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -142,14 +145,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 33_194_000 picoseconds. - Weight::from_parts(34_089_000, 0) + // Minimum execution time: 27_229_000 picoseconds. + Weight::from_parts(27_673_000, 0) .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -166,10 +171,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 35_413_000 picoseconds. - Weight::from_parts(36_359_000, 0) + // Minimum execution time: 29_812_000 picoseconds. + Weight::from_parts(30_649_000, 0) .saturating_add(Weight::from_parts(0, 3720)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -178,8 +183,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_679_000 picoseconds. - Weight::from_parts(2_823_000, 0) + // Minimum execution time: 2_212_000 picoseconds. + Weight::from_parts(2_367_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -189,8 +194,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `95` // Estimated: `10985` - // Minimum execution time: 15_117_000 picoseconds. - Weight::from_parts(15_603_000, 0) + // Minimum execution time: 14_768_000 picoseconds. + Weight::from_parts(15_036_000, 0) .saturating_add(Weight::from_parts(0, 10985)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -201,8 +206,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `99` // Estimated: `10989` - // Minimum execution time: 14_978_000 picoseconds. - Weight::from_parts(15_370_000, 0) + // Minimum execution time: 14_662_000 picoseconds. + Weight::from_parts(15_155_000, 0) .saturating_add(Weight::from_parts(0, 10989)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -213,13 +218,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 16_549_000 picoseconds. - Weight::from_parts(16_944_000, 0) + // Minimum execution time: 16_198_000 picoseconds. + Weight::from_parts(16_456_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -234,10 +241,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 30_111_000 picoseconds. - Weight::from_parts(30_795_000, 0) + // Minimum execution time: 25_825_000 picoseconds. + Weight::from_parts(26_744_000, 0) .saturating_add(Weight::from_parts(0, 6046)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -247,7 +254,7 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Measured: `136` // Estimated: `8551` // Minimum execution time: 8_622_000 picoseconds. - Weight::from_parts(8_865_000, 0) + Weight::from_parts(8_931_000, 0) .saturating_add(Weight::from_parts(0, 8551)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -257,14 +264,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `10996` - // Minimum execution time: 15_194_000 picoseconds. - Weight::from_parts(15_646_000, 0) + // Minimum execution time: 15_397_000 picoseconds. + Weight::from_parts(15_650_000, 0) .saturating_add(Weight::from_parts(0, 10996)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -279,10 +288,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `112` // Estimated: `11002` - // Minimum execution time: 36_625_000 picoseconds. - Weight::from_parts(37_571_000, 0) + // Minimum execution time: 32_330_000 picoseconds. + Weight::from_parts(33_255_000, 0) .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 4_142_000 picoseconds. + Weight::from_parts(4_308_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 25_814_000 picoseconds. + Weight::from_parts(26_213_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs index 98dd7e36f07..b73c009cbda 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=bridge-hub-polkadot-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -63,10 +64,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_510_000 picoseconds. - Weight::from_parts(25_755_000, 0) + // Minimum execution time: 22_442_000 picoseconds. + Weight::from_parts(23_346_000, 0) .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -75,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1489` - // Minimum execution time: 24_125_000 picoseconds. - Weight::from_parts(25_559_000, 0) + // Minimum execution time: 19_655_000 picoseconds. + Weight::from_parts(20_086_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -106,8 +107,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_625_000 picoseconds. - Weight::from_parts(9_232_000, 0) + // Minimum execution time: 6_858_000 picoseconds. + Weight::from_parts(7_225_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -117,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_690_000 picoseconds. - Weight::from_parts(2_906_000, 0) + // Minimum execution time: 2_099_000 picoseconds. + Weight::from_parts(2_190_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -126,6 +127,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -142,14 +145,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 30_131_000 picoseconds. - Weight::from_parts(31_138_000, 0) + // Minimum execution time: 27_073_000 picoseconds. + Weight::from_parts(27_584_000, 0) .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -164,12 +169,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `220` - // Estimated: `3685` - // Minimum execution time: 32_411_000 picoseconds. - Weight::from_parts(33_009_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `255` + // Estimated: `3720` + // Minimum execution time: 29_949_000 picoseconds. + Weight::from_parts(30_760_000, 0) + .saturating_add(Weight::from_parts(0, 3720)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -178,8 +183,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_548_000 picoseconds. - Weight::from_parts(2_727_000, 0) + // Minimum execution time: 2_192_000 picoseconds. + Weight::from_parts(2_276_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -189,8 +194,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `95` // Estimated: `10985` - // Minimum execution time: 15_298_000 picoseconds. - Weight::from_parts(15_964_000, 0) + // Minimum execution time: 14_681_000 picoseconds. + Weight::from_parts(15_131_000, 0) .saturating_add(Weight::from_parts(0, 10985)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -201,8 +206,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `99` // Estimated: `10989` - // Minimum execution time: 14_927_000 picoseconds. - Weight::from_parts(15_528_000, 0) + // Minimum execution time: 14_523_000 picoseconds. + Weight::from_parts(15_113_000, 0) .saturating_add(Weight::from_parts(0, 10989)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -213,13 +218,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 16_409_000 picoseconds. - Weight::from_parts(16_960_000, 0) + // Minimum execution time: 15_989_000 picoseconds. + Weight::from_parts(16_518_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -234,10 +241,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 28_204_000 picoseconds. - Weight::from_parts(28_641_000, 0) + // Minimum execution time: 25_127_000 picoseconds. + Weight::from_parts(25_773_000, 0) .saturating_add(Weight::from_parts(0, 6046)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -246,8 +253,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `8551` - // Minimum execution time: 8_576_000 picoseconds. - Weight::from_parts(8_895_000, 0) + // Minimum execution time: 8_352_000 picoseconds. + Weight::from_parts(8_592_000, 0) .saturating_add(Weight::from_parts(0, 8551)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -257,14 +264,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `10996` - // Minimum execution time: 15_263_000 picoseconds. - Weight::from_parts(15_726_000, 0) + // Minimum execution time: 14_658_000 picoseconds. + Weight::from_parts(15_345_000, 0) .saturating_add(Weight::from_parts(0, 10996)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -279,10 +288,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `112` // Estimated: `11002` - // Minimum execution time: 34_186_000 picoseconds. - Weight::from_parts(35_204_000, 0) + // Minimum execution time: 31_478_000 picoseconds. + Weight::from_parts(32_669_000, 0) .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 4_066_000 picoseconds. + Weight::from_parts(4_267_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 25_260_000 picoseconds. + Weight::from_parts(25_570_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs index a6e093c4b94..5aa4999c624 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -63,22 +64,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3540` - // Minimum execution time: 29_724_000 picoseconds. - Weight::from_parts(30_440_000, 0) + // Minimum execution time: 24_179_000 picoseconds. + Weight::from_parts(24_684_000, 0) .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1489` - // Minimum execution time: 26_779_000 picoseconds. - Weight::from_parts(27_249_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 21_093_000 picoseconds. + Weight::from_parts(21_523_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(2)) } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -106,8 +109,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_170_000 picoseconds. - Weight::from_parts(9_629_000, 0) + // Minimum execution time: 6_938_000 picoseconds. + Weight::from_parts(7_243_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -117,8 +120,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_769_000 picoseconds. - Weight::from_parts(2_933_000, 0) + // Minimum execution time: 2_159_000 picoseconds. + Weight::from_parts(2_290_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -126,6 +129,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -142,14 +147,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3540` - // Minimum execution time: 34_547_000 picoseconds. - Weight::from_parts(35_653_000, 0) + // Minimum execution time: 28_337_000 picoseconds. + Weight::from_parts(29_265_000, 0) .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -166,10 +173,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 36_274_000 picoseconds. - Weight::from_parts(37_281_000, 0) + // Minimum execution time: 30_599_000 picoseconds. + Weight::from_parts(31_272_000, 0) .saturating_add(Weight::from_parts(0, 3757)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -178,8 +185,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_749_000 picoseconds. - Weight::from_parts(2_917_000, 0) + // Minimum execution time: 2_132_000 picoseconds. + Weight::from_parts(2_280_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -189,8 +196,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `187` // Estimated: `11077` - // Minimum execution time: 17_649_000 picoseconds. - Weight::from_parts(17_964_000, 0) + // Minimum execution time: 18_262_000 picoseconds. + Weight::from_parts(18_640_000, 0) .saturating_add(Weight::from_parts(0, 11077)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -201,8 +208,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `191` // Estimated: `11081` - // Minimum execution time: 17_551_000 picoseconds. - Weight::from_parts(18_176_000, 0) + // Minimum execution time: 18_512_000 picoseconds. + Weight::from_parts(18_888_000, 0) .saturating_add(Weight::from_parts(0, 11081)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -213,13 +220,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `198` // Estimated: `13563` - // Minimum execution time: 19_261_000 picoseconds. - Weight::from_parts(19_714_000, 0) + // Minimum execution time: 19_362_000 picoseconds. + Weight::from_parts(20_056_000, 0) .saturating_add(Weight::from_parts(0, 13563)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -234,10 +243,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 31_630_000 picoseconds. - Weight::from_parts(32_340_000, 0) + // Minimum execution time: 27_318_000 picoseconds. + Weight::from_parts(28_075_000, 0) .saturating_add(Weight::from_parts(0, 6082)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -246,8 +255,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `172` // Estimated: `8587` - // Minimum execution time: 9_218_000 picoseconds. - Weight::from_parts(9_558_000, 0) + // Minimum execution time: 9_930_000 picoseconds. + Weight::from_parts(10_192_000, 0) .saturating_add(Weight::from_parts(0, 8587)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -257,14 +266,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `198` // Estimated: `11088` - // Minimum execution time: 18_133_000 picoseconds. - Weight::from_parts(18_663_000, 0) + // Minimum execution time: 18_305_000 picoseconds. + Weight::from_parts(18_738_000, 0) .saturating_add(Weight::from_parts(0, 11088)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -279,10 +290,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `204` // Estimated: `11094` - // Minimum execution time: 38_878_000 picoseconds. - Weight::from_parts(39_779_000, 0) + // Minimum execution time: 34_559_000 picoseconds. + Weight::from_parts(35_241_000, 0) .saturating_add(Weight::from_parts(0, 11094)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `69` + // Estimated: `1554` + // Minimum execution time: 4_512_000 picoseconds. + Weight::from_parts(4_671_000, 0) + .saturating_add(Weight::from_parts(0, 1554)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7706` + // Estimated: `11171` + // Minimum execution time: 26_473_000 picoseconds. + Weight::from_parts(26_960_000, 0) + .saturating_add(Weight::from_parts(0, 11171)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs index 72bdb282585..9f17d327024 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs @@ -286,4 +286,31 @@ impl pallet_xcm::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 4_142_000 picoseconds. + Weight::from_parts(4_308_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 25_814_000 picoseconds. + Weight::from_parts(26_213_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } + diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs index 26e668854f2..57e50284147 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=collectives-polkadot-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -61,22 +62,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `111` - // Estimated: `3576` - // Minimum execution time: 27_795_000 picoseconds. - Weight::from_parts(28_215_000, 0) - .saturating_add(Weight::from_parts(0, 3576)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 25_050_000 picoseconds. + Weight::from_parts(26_382_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `32` + // Measured: `69` // Estimated: `1489` - // Minimum execution time: 23_847_000 picoseconds. - Weight::from_parts(24_332_000, 0) + // Minimum execution time: 21_625_000 picoseconds. + Weight::from_parts(22_076_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -106,8 +107,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_885_000 picoseconds. - Weight::from_parts(9_128_000, 0) + // Minimum execution time: 7_076_000 picoseconds. + Weight::from_parts(7_378_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -117,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_670_000 picoseconds. - Weight::from_parts(2_815_000, 0) + // Minimum execution time: 2_327_000 picoseconds. + Weight::from_parts(2_454_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -126,6 +127,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -140,16 +143,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `111` - // Estimated: `3576` - // Minimum execution time: 32_214_000 picoseconds. - Weight::from_parts(32_989_000, 0) - .saturating_add(Weight::from_parts(0, 3576)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 29_080_000 picoseconds. + Weight::from_parts(29_886_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -164,12 +169,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `294` - // Estimated: `3759` - // Minimum execution time: 33_638_000 picoseconds. - Weight::from_parts(34_206_000, 0) - .saturating_add(Weight::from_parts(0, 3759)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 30_746_000 picoseconds. + Weight::from_parts(31_631_000, 0) + .saturating_add(Weight::from_parts(0, 3828)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -178,8 +183,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_602_000 picoseconds. - Weight::from_parts(2_730_000, 0) + // Minimum execution time: 2_208_000 picoseconds. + Weight::from_parts(2_341_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -187,11 +192,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `129` - // Estimated: `11019` - // Minimum execution time: 16_199_000 picoseconds. - Weight::from_parts(16_833_000, 0) - .saturating_add(Weight::from_parts(0, 11019)) + // Measured: `162` + // Estimated: `11052` + // Minimum execution time: 16_239_000 picoseconds. + Weight::from_parts(16_881_000, 0) + .saturating_add(Weight::from_parts(0, 11052)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -199,11 +204,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `133` - // Estimated: `11023` - // Minimum execution time: 16_561_000 picoseconds. - Weight::from_parts(16_872_000, 0) - .saturating_add(Weight::from_parts(0, 11023)) + // Measured: `166` + // Estimated: `11056` + // Minimum execution time: 16_711_000 picoseconds. + Weight::from_parts(16_944_000, 0) + .saturating_add(Weight::from_parts(0, 11056)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -211,15 +216,17 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `13505` - // Minimum execution time: 17_812_000 picoseconds. - Weight::from_parts(20_036_000, 0) - .saturating_add(Weight::from_parts(0, 13505)) + // Measured: `173` + // Estimated: `13538` + // Minimum execution time: 18_142_000 picoseconds. + Weight::from_parts(18_470_000, 0) + .saturating_add(Weight::from_parts(0, 13538)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -232,39 +239,41 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `178` - // Estimated: `6118` - // Minimum execution time: 30_153_000 picoseconds. - Weight::from_parts(31_366_000, 0) - .saturating_add(Weight::from_parts(0, 6118)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `212` + // Estimated: `6152` + // Minimum execution time: 27_687_000 picoseconds. + Weight::from_parts(28_250_000, 0) + .saturating_add(Weight::from_parts(0, 6152)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `8587` - // Minimum execution time: 9_465_000 picoseconds. - Weight::from_parts(9_743_000, 0) - .saturating_add(Weight::from_parts(0, 8587)) + // Measured: `206` + // Estimated: `8621` + // Minimum execution time: 9_675_000 picoseconds. + Weight::from_parts(9_992_000, 0) + .saturating_add(Weight::from_parts(0, 8621)) .saturating_add(T::DbWeight::get().reads(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `11030` - // Minimum execution time: 16_954_000 picoseconds. - Weight::from_parts(19_772_000, 0) - .saturating_add(Weight::from_parts(0, 11030)) + // Measured: `173` + // Estimated: `11063` + // Minimum execution time: 16_597_000 picoseconds. + Weight::from_parts(17_248_000, 0) + .saturating_add(Weight::from_parts(0, 11063)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -277,12 +286,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `182` - // Estimated: `11072` - // Minimum execution time: 37_302_000 picoseconds. - Weight::from_parts(38_124_000, 0) - .saturating_add(Weight::from_parts(0, 11072)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `215` + // Estimated: `11105` + // Minimum execution time: 34_649_000 picoseconds. + Weight::from_parts(35_475_000, 0) + .saturating_add(Weight::from_parts(0, 11105)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 4_619_000 picoseconds. + Weight::from_parts(4_756_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_721_000 picoseconds. + Weight::from_parts(27_412_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index 43b4358b890..aafded3f731 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot @@ -29,14 +29,13 @@ // --steps=50 // --repeat=20 // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_xcm // --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,58 +48,56 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 37_039_000 picoseconds. - Weight::from_parts(37_605_000, 0) - .saturating_add(Weight::from_parts(0, 4030)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(3)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 27_328_000 picoseconds. + Weight::from_parts(27_976_000, 0) + .saturating_add(Weight::from_parts(0, 3607)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) } fn teleport_assets() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_646_000 picoseconds. - Weight::from_parts(22_119_000, 0) + // Minimum execution time: 16_280_000 picoseconds. + Weight::from_parts(16_904_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_353_000 picoseconds. - Weight::from_parts(21_768_000, 0) + // Minimum execution time: 15_869_000 picoseconds. + Weight::from_parts(16_264_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_942_000 picoseconds. - Weight::from_parts(10_110_000, 0) + // Minimum execution time: 6_923_000 picoseconds. + Weight::from_parts(7_432_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: XcmPallet SupportedVersion (r:0 w:1) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_951_000 picoseconds. - Weight::from_parts(10_182_000, 0) + // Minimum execution time: 7_333_000 picoseconds. + Weight::from_parts(7_566_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -108,171 +105,189 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_163_000 picoseconds. - Weight::from_parts(3_298_000, 0) + // Minimum execution time: 2_219_000 picoseconds. + Weight::from_parts(2_375_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: XcmPallet VersionNotifiers (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet QueryCounter (r:1 w:1) - /// Proof Skipped: XcmPallet QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 41_207_000 picoseconds. - Weight::from_parts(41_879_000, 0) - .saturating_add(Weight::from_parts(0, 4030)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(6)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 30_650_000 picoseconds. + Weight::from_parts(31_683_000, 0) + .saturating_add(Weight::from_parts(0, 3607)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: XcmPallet VersionNotifiers (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `837` - // Estimated: `4302` - // Minimum execution time: 44_763_000 picoseconds. - Weight::from_parts(45_368_000, 0) - .saturating_add(Weight::from_parts(0, 4302)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `322` + // Estimated: `3787` + // Minimum execution time: 37_666_000 picoseconds. + Weight::from_parts(38_920_000, 0) + .saturating_add(Weight::from_parts(0, 3787)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: XcmPallet XcmExecutionSuspended (r:0 w:1) - /// Proof Skipped: XcmPallet XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `XcmPallet::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `XcmPallet::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_suspension() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_089_000 picoseconds. - Weight::from_parts(3_246_000, 0) + // Minimum execution time: 2_244_000 picoseconds. + Weight::from_parts(2_425_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: XcmPallet SupportedVersion (r:4 w:2) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::SupportedVersion` (r:4 w:2) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `229` - // Estimated: `11119` - // Minimum execution time: 16_733_000 picoseconds. - Weight::from_parts(17_354_000, 0) - .saturating_add(Weight::from_parts(0, 11119)) + // Measured: `26` + // Estimated: `10916` + // Minimum execution time: 14_710_000 picoseconds. + Weight::from_parts(15_156_000, 0) + .saturating_add(Weight::from_parts(0, 10916)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifiers (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `233` - // Estimated: `11123` - // Minimum execution time: 16_959_000 picoseconds. - Weight::from_parts(17_306_000, 0) - .saturating_add(Weight::from_parts(0, 11123)) + // Measured: `30` + // Estimated: `10920` + // Minimum execution time: 14_630_000 picoseconds. + Weight::from_parts(15_290_000, 0) + .saturating_add(Weight::from_parts(0, 10920)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifyTargets (r:5 w:0) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `13608` - // Minimum execution time: 17_964_000 picoseconds. - Weight::from_parts(18_548_000, 0) - .saturating_add(Weight::from_parts(0, 13608)) + // Measured: `40` + // Estimated: `13405` + // Minimum execution time: 16_686_000 picoseconds. + Weight::from_parts(17_332_000, 0) + .saturating_add(Weight::from_parts(0, 13405)) .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: XcmPallet VersionNotifyTargets (r:2 w:1) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `635` - // Estimated: `6575` - // Minimum execution time: 39_436_000 picoseconds. - Weight::from_parts(39_669_000, 0) - .saturating_add(Weight::from_parts(0, 6575)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `178` + // Estimated: `6118` + // Minimum execution time: 30_180_000 picoseconds. + Weight::from_parts(31_351_000, 0) + .saturating_add(Weight::from_parts(0, 6118)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:3 w:0) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:3 w:0) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `272` - // Estimated: `8687` - // Minimum execution time: 8_991_000 picoseconds. - Weight::from_parts(9_248_000, 0) - .saturating_add(Weight::from_parts(0, 8687)) + // Measured: `69` + // Estimated: `8484` + // Minimum execution time: 9_624_000 picoseconds. + Weight::from_parts(10_029_000, 0) + .saturating_add(Weight::from_parts(0, 8484)) .saturating_add(T::DbWeight::get().reads(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `240` - // Estimated: `11130` - // Minimum execution time: 17_614_000 picoseconds. - Weight::from_parts(17_948_000, 0) - .saturating_add(Weight::from_parts(0, 11130)) + // Measured: `37` + // Estimated: `10927` + // Minimum execution time: 15_139_000 picoseconds. + Weight::from_parts(15_575_000, 0) + .saturating_add(Weight::from_parts(0, 10927)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `639` - // Estimated: `11529` - // Minimum execution time: 45_531_000 picoseconds. - Weight::from_parts(46_533_000, 0) - .saturating_add(Weight::from_parts(0, 11529)) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `182` + // Estimated: `11072` + // Minimum execution time: 37_871_000 picoseconds. + Weight::from_parts(38_940_000, 0) + .saturating_add(Weight::from_parts(0, 11072)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 2_732_000 picoseconds. + Weight::from_parts(2_892_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmPallet::Queries` (r:1 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7576` + // Estimated: `11041` + // Minimum execution time: 23_813_000 picoseconds. + Weight::from_parts(24_201_000, 0) + .saturating_add(Weight::from_parts(0, 11041)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/polkadot/runtime/westend/src/weights/pallet_xcm.rs b/polkadot/runtime/westend/src/weights/pallet_xcm.rs index 7f2a1de44e9..cca4bdbd91e 100644 --- a/polkadot/runtime/westend/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/westend/src/weights/pallet_xcm.rs @@ -17,15 +17,16 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=westend-dev // --steps=50 // --repeat=20 // --no-storage-info @@ -35,12 +36,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=westend-dev -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -53,44 +50,42 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 33_628_000 picoseconds. - Weight::from_parts(34_633_000, 0) - .saturating_add(Weight::from_parts(0, 3634)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `109` + // Estimated: `3574` + // Minimum execution time: 28_098_000 picoseconds. + Weight::from_parts(28_887_000, 0) + .saturating_add(Weight::from_parts(0, 3574)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) } fn teleport_assets() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_535_000 picoseconds. - Weight::from_parts(21_936_000, 0) + // Minimum execution time: 17_609_000 picoseconds. + Weight::from_parts(18_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_576_000 picoseconds. - Weight::from_parts(21_942_000, 0) + // Minimum execution time: 17_007_000 picoseconds. + Weight::from_parts(17_471_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -99,14 +94,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: XcmPallet SupportedVersion (r:0 w:1) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_764_000 picoseconds. - Weight::from_parts(9_927_000, 0) + // Minimum execution time: 7_444_000 picoseconds. + Weight::from_parts(7_671_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -114,171 +109,189 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_824_000 picoseconds. - Weight::from_parts(2_935_000, 0) + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_253_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: XcmPallet VersionNotifiers (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet QueryCounter (r:1 w:1) - /// Proof Skipped: XcmPallet QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 38_436_000 picoseconds. - Weight::from_parts(39_300_000, 0) - .saturating_add(Weight::from_parts(0, 3634)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `109` + // Estimated: `3574` + // Minimum execution time: 31_318_000 picoseconds. + Weight::from_parts(32_413_000, 0) + .saturating_add(Weight::from_parts(0, 3574)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: XcmPallet VersionNotifiers (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `361` - // Estimated: `3826` - // Minimum execution time: 41_600_000 picoseconds. - Weight::from_parts(42_703_000, 0) - .saturating_add(Weight::from_parts(0, 3826)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(6)) + // Measured: `289` + // Estimated: `3754` + // Minimum execution time: 35_282_000 picoseconds. + Weight::from_parts(35_969_000, 0) + .saturating_add(Weight::from_parts(0, 3754)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: XcmPallet XcmExecutionSuspended (r:0 w:1) - /// Proof Skipped: XcmPallet XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `XcmPallet::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `XcmPallet::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_suspension() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_792_000 picoseconds. - Weight::from_parts(2_958_000, 0) + // Minimum execution time: 2_247_000 picoseconds. + Weight::from_parts(2_381_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: XcmPallet SupportedVersion (r:4 w:2) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::SupportedVersion` (r:4 w:2) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `229` - // Estimated: `11119` - // Minimum execution time: 17_640_000 picoseconds. - Weight::from_parts(18_011_000, 0) - .saturating_add(Weight::from_parts(0, 11119)) + // Measured: `26` + // Estimated: `10916` + // Minimum execution time: 14_512_000 picoseconds. + Weight::from_parts(15_042_000, 0) + .saturating_add(Weight::from_parts(0, 10916)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifiers (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `233` - // Estimated: `11123` - // Minimum execution time: 17_325_000 picoseconds. - Weight::from_parts(17_896_000, 0) - .saturating_add(Weight::from_parts(0, 11123)) + // Measured: `30` + // Estimated: `10920` + // Minimum execution time: 14_659_000 picoseconds. + Weight::from_parts(15_164_000, 0) + .saturating_add(Weight::from_parts(0, 10920)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifyTargets (r:5 w:0) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `13608` - // Minimum execution time: 19_295_000 picoseconds. - Weight::from_parts(19_840_000, 0) - .saturating_add(Weight::from_parts(0, 13608)) + // Measured: `40` + // Estimated: `13405` + // Minimum execution time: 16_261_000 picoseconds. + Weight::from_parts(16_986_000, 0) + .saturating_add(Weight::from_parts(0, 13405)) .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: XcmPallet VersionNotifyTargets (r:2 w:1) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `239` - // Estimated: `6179` - // Minimum execution time: 35_819_000 picoseconds. - Weight::from_parts(36_708_000, 0) - .saturating_add(Weight::from_parts(0, 6179)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `145` + // Estimated: `6085` + // Minimum execution time: 30_539_000 picoseconds. + Weight::from_parts(31_117_000, 0) + .saturating_add(Weight::from_parts(0, 6085)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:3 w:0) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:3 w:0) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `272` - // Estimated: `8687` - // Minimum execution time: 9_572_000 picoseconds. - Weight::from_parts(9_907_000, 0) - .saturating_add(Weight::from_parts(0, 8687)) + // Measured: `69` + // Estimated: `8484` + // Minimum execution time: 9_463_000 picoseconds. + Weight::from_parts(9_728_000, 0) + .saturating_add(Weight::from_parts(0, 8484)) .saturating_add(T::DbWeight::get().reads(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `240` - // Estimated: `11130` - // Minimum execution time: 17_376_000 picoseconds. - Weight::from_parts(17_870_000, 0) - .saturating_add(Weight::from_parts(0, 11130)) + // Measured: `37` + // Estimated: `10927` + // Minimum execution time: 15_169_000 picoseconds. + Weight::from_parts(15_694_000, 0) + .saturating_add(Weight::from_parts(0, 10927)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `11133` - // Minimum execution time: 43_468_000 picoseconds. - Weight::from_parts(44_327_000, 0) - .saturating_add(Weight::from_parts(0, 11133)) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(6)) + // Measured: `149` + // Estimated: `11039` + // Minimum execution time: 37_549_000 picoseconds. + Weight::from_parts(38_203_000, 0) + .saturating_add(Weight::from_parts(0, 11039)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 2_947_000 picoseconds. + Weight::from_parts(3_117_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmPallet::Queries` (r:1 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7576` + // Estimated: `11041` + // Minimum execution time: 24_595_000 picoseconds. + Weight::from_parts(24_907_000, 0) + .saturating_add(Weight::from_parts(0, 11041)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index da472fbe6db..6b5d5e75de8 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -23,12 +23,12 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false} xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } [dev-dependencies] pallet-balances = { path = "../../../substrate/frame/balances" } polkadot-runtime-parachains = { path = "../../runtime/parachains" } polkadot-parachain-primitives = { path = "../../parachain" } -xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder" } [features] default = [ "std" ] @@ -45,6 +45,7 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "xcm-builder/std", "xcm-executor/std", "xcm/std", ] diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index aca4bd1fb3f..3eecbfec518 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -190,6 +190,33 @@ benchmarks! { Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } + new_query { + let responder = MultiLocation::from(Parent); + let timeout = 1u32.into(); + let match_querier = MultiLocation::from(Here); + }: { + Pallet::::new_query(responder, timeout, match_querier); + } + + take_response { + let responder = MultiLocation::from(Parent); + let timeout = 1u32.into(); + let match_querier = MultiLocation::from(Here); + let query_id = Pallet::::new_query(responder, timeout, match_querier); + let infos = (0 .. xcm::v3::MaxPalletsInfo::get()).map(|_| PalletInfo::new( + u32::MAX, + (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), + (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), + u32::MAX, + u32::MAX, + u32::MAX, + ).unwrap()).collect::>(); + Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); + + }: { + as QueryHandler>::take_response(query_id); + } + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext_with_balances(Vec::new()), diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 321bb294b88..2d969fb870c 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -28,9 +28,17 @@ mod tests; pub mod migration; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -use frame_support::traits::{ - Contains, ContainsPair, Currency, Defensive, EnsureOrigin, Get, LockableCurrency, OriginTrait, +use frame_support::{ + dispatch::GetDispatchInfo, + pallet_prelude::*, + traits::{ + Contains, ContainsPair, Currency, Defensive, EnsureOrigin, Get, LockableCurrency, + OriginTrait, WithdrawReasons, + }, + PalletId, }; +use frame_system::pallet_prelude::{BlockNumberFor, *}; +pub use pallet::*; use scale_info::TypeInfo; use sp_runtime::{ traits::{ @@ -41,17 +49,15 @@ use sp_runtime::{ }; use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; use xcm::{latest::QueryResponseInfo, prelude::*}; -use xcm_executor::traits::{ConvertOrigin, Properties}; - -use frame_support::{ - dispatch::GetDispatchInfo, pallet_prelude::*, traits::WithdrawReasons, PalletId, +use xcm_builder::{ + ExecuteController, ExecuteControllerWeightInfo, QueryController, QueryControllerWeightInfo, + SendController, SendControllerWeightInfo, }; -use frame_system::pallet_prelude::*; -pub use pallet::*; use xcm_executor::{ traits::{ - CheckSuspension, ClaimAssets, ConvertLocation, DropAssets, MatchesFungible, OnResponse, - QueryHandler, QueryResponseStatus, VersionChangeNotifier, WeightBounds, + CheckSuspension, ClaimAssets, ConvertLocation, ConvertOrigin, DropAssets, MatchesFungible, + OnResponse, Properties, QueryHandler, QueryResponseStatus, VersionChangeNotifier, + WeightBounds, }, Assets, }; @@ -73,6 +79,8 @@ pub trait WeightInfo { fn notify_target_migration_fail() -> Weight; fn migrate_version_notify_targets() -> Weight; fn migrate_and_notify_old_targets() -> Weight; + fn new_query() -> Weight; + fn take_response() -> Weight; } /// fallback implementation @@ -141,6 +149,14 @@ impl WeightInfo for TestWeightInfo { fn migrate_and_notify_old_targets() -> Weight { Weight::from_parts(100_000_000, 0) } + + fn new_query() -> Weight { + Weight::from_parts(100_000_000, 0) + } + + fn take_response() -> Weight { + Weight::from_parts(100_000_000, 0) + } } #[frame_support::pallet] @@ -267,6 +283,93 @@ pub mod pallet { type ReachableDest: Get>; } + impl ExecuteControllerWeightInfo for Pallet { + fn execute() -> Weight { + T::WeightInfo::execute() + } + } + + impl ExecuteController, ::RuntimeCall> for Pallet { + type WeightInfo = Self; + fn execute( + origin: OriginFor, + message: Box::RuntimeCall>>, + max_weight: Weight, + ) -> Result { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let hash = message.using_encoded(sp_io::hashing::blake2_256); + let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; + let value = (origin_location, message); + ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); + let (origin_location, message) = value; + let outcome = T::XcmExecutor::execute_xcm_in_credit( + origin_location, + message, + hash, + max_weight, + max_weight, + ); + Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); + Ok(outcome) + } + } + + impl SendControllerWeightInfo for Pallet { + fn send() -> Weight { + T::WeightInfo::send() + } + } + + impl SendController> for Pallet { + type WeightInfo = Self; + fn send( + origin: OriginFor, + dest: Box, + message: Box>, + ) -> Result { + let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; + let interior: Junctions = + origin_location.try_into().map_err(|_| Error::::InvalidOrigin)?; + let dest = MultiLocation::try_from(*dest).map_err(|()| Error::::BadVersion)?; + let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; + + let message_id = + Self::send_xcm(interior, dest, message.clone()).map_err(Error::::from)?; + let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; + Self::deposit_event(e); + Ok(message_id) + } + } + + impl QueryControllerWeightInfo for Pallet { + fn query() -> Weight { + T::WeightInfo::new_query() + } + fn take_response() -> Weight { + T::WeightInfo::take_response() + } + } + + impl QueryController, BlockNumberFor> for Pallet { + type WeightInfo = Self; + + fn query( + origin: OriginFor, + timeout: BlockNumberFor, + match_querier: VersionedMultiLocation, + ) -> Result { + let responder = ::ExecuteXcmOrigin::ensure_origin(origin)?; + let query_id = ::new_query( + responder, + timeout, + MultiLocation::try_from(match_querier) + .map_err(|_| Into::::into(Error::::BadVersion))?, + ); + + Ok(query_id) + } + } + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -771,16 +874,7 @@ pub mod pallet { dest: Box, message: Box>, ) -> DispatchResult { - let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; - let interior: Junctions = - origin_location.try_into().map_err(|_| Error::::InvalidOrigin)?; - let dest = MultiLocation::try_from(*dest).map_err(|()| Error::::BadVersion)?; - let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; - - let message_id = - Self::send_xcm(interior, dest, message.clone()).map_err(Error::::from)?; - let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; - Self::deposit_event(e); + >::send(origin, dest, message)?; Ok(()) } @@ -896,7 +990,7 @@ pub mod pallet { /// execution attempt will be made. /// /// NOTE: A successful return to this does *not* imply that the `msg` was executed - /// successfully to completion; only that *some* of it was executed. + /// successfully to completion; only that it was attempted. #[pallet::call_index(3)] #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] pub fn execute( @@ -904,23 +998,8 @@ pub mod pallet { message: Box::RuntimeCall>>, max_weight: Weight, ) -> DispatchResultWithPostInfo { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let hash = message.using_encoded(sp_io::hashing::blake2_256); - let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; - let value = (origin_location, message); - ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); - let (origin_location, message) = value; - let outcome = T::XcmExecutor::execute_xcm_in_credit( - origin_location, - message, - hash, - max_weight, - max_weight, - ); - let result = - Ok(Some(outcome.weight_used().saturating_add(T::WeightInfo::execute())).into()); - Self::deposit_event(Event::Attempted { outcome }); - result + let outcome = >::execute(origin, message, max_weight)?; + Ok(Some(outcome.weight_used().saturating_add(T::WeightInfo::execute())).into()) } /// Extoll that a particular destination can be communicated with through a particular @@ -1145,7 +1224,7 @@ impl QueryHandler for Pallet { timeout: BlockNumberFor, match_querier: impl Into, ) -> Self::QueryId { - Self::do_new_query(responder, None, timeout, match_querier).into() + Self::do_new_query(responder, None, timeout, match_querier) } /// To check the status of the query, use `fn query()` passing the resultant `QueryId` diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs new file mode 100644 index 00000000000..0ee638b73e1 --- /dev/null +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -0,0 +1,187 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A set of traits that define how a pallet interface with XCM. +//! Controller traits defined in this module are high-level traits that will rely on other traits +//! from `xcm-executor` to perform their tasks. + +use frame_support::pallet_prelude::DispatchError; +use sp_std::boxed::Box; +use xcm::prelude::*; +use xcm_executor::traits::QueryHandler; + +/// Umbrella trait for all Controller traits. +pub trait Controller: + ExecuteController + SendController + QueryController +{ +} + +impl Controller for T where + T: ExecuteController + + SendController + + QueryController +{ +} + +/// Weight functions needed for [`ExecuteController`]. +pub trait ExecuteControllerWeightInfo { + /// Weight for [`ExecuteController::execute`] + fn execute() -> Weight; +} + +/// Execute an XCM locally, for a given origin. +/// +/// An implementation of that trait will handle the low-level details of the execution, such as: +/// - Validating and Converting the origin to a MultiLocation. +/// - Handling versioning. +/// - Calling the internal executor, which implements [`ExecuteXcm`]. +pub trait ExecuteController { + /// Weight information for ExecuteController functions. + type WeightInfo: ExecuteControllerWeightInfo; + + /// Attempt to execute an XCM locally, and return the outcome. + /// + /// # Parameters + /// + /// - `origin`: the origin of the call. + /// - `message`: the XCM program to be executed. + /// - `max_weight`: the maximum weight that can be consumed by the execution. + fn execute( + origin: Origin, + message: Box>, + max_weight: Weight, + ) -> Result; +} + +/// Weight functions needed for [`SendController`]. +pub trait SendControllerWeightInfo { + /// Weight for [`SendController::send`] + fn send() -> Weight; +} + +/// Send an XCM from a given origin. +/// +/// An implementation of that trait will handle the low-level details of dispatching an XCM, such +/// as: +/// - Validating and Converting the origin to an interior location. +/// - Handling versioning. +/// - Calling the internal router, which implements [`SendXcm`]. +pub trait SendController { + /// Weight information for SendController functions. + type WeightInfo: SendControllerWeightInfo; + + /// Send an XCM to be executed by a remote location. + /// + /// # Parameters + /// + /// - `origin`: the origin of the call. + /// - `dest`: the destination of the message. + /// - `msg`: the XCM to be sent. + fn send( + origin: Origin, + dest: Box, + message: Box>, + ) -> Result; +} + +/// Weight functions needed for [`QueryController`]. +pub trait QueryControllerWeightInfo { + /// Weight for [`QueryController::query`] + fn query() -> Weight; + + /// Weight for [`QueryHandler::take_response`] + fn take_response() -> Weight; +} + +/// Query a remote location, from a given origin. +/// +/// An implementation of that trait will handle the low-level details of querying a remote location, +/// such as: +/// - Validating and Converting the origin to an interior location. +/// - Handling versioning. +/// - Calling the [`QueryHandler`] to register the query. +pub trait QueryController: QueryHandler { + /// Weight information for QueryController functions. + type WeightInfo: QueryControllerWeightInfo; + + /// Query a remote location. + /// + /// # Parameters + /// + /// - `origin`: the origin of the call, used to determine the responder. + /// - `timeout`: the maximum block number that the query should be responded to. + /// - `match_querier`: the querier that the query should be responded to. + fn query( + origin: Origin, + timeout: Timeout, + match_querier: VersionedMultiLocation, + ) -> Result; +} + +impl ExecuteController for () { + type WeightInfo = (); + fn execute( + _origin: Origin, + _message: Box>, + _max_weight: Weight, + ) -> Result { + Ok(Outcome::Error(XcmError::Unimplemented)) + } +} + +impl ExecuteControllerWeightInfo for () { + fn execute() -> Weight { + Weight::zero() + } +} + +impl SendController for () { + type WeightInfo = (); + fn send( + _origin: Origin, + _dest: Box, + _message: Box>, + ) -> Result { + Ok(Default::default()) + } +} + +impl SendControllerWeightInfo for () { + fn send() -> Weight { + Weight::zero() + } +} + +impl QueryControllerWeightInfo for () { + fn query() -> Weight { + Weight::zero() + } + fn take_response() -> Weight { + Weight::zero() + } +} + +impl QueryController for () { + type WeightInfo = (); + + fn query( + _origin: Origin, + _timeout: Timeout, + _match_querier: VersionedMultiLocation, + ) -> Result { + Ok(Default::default()) + } +} diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 0a74b3f579a..35f95b85c89 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -115,3 +115,9 @@ pub use origin_aliases::AliasForeignAccountId32; mod pay; pub use pay::{FixedLocation, LocatableAssetId, PayAccountId32OnChainOverXcm, PayOverXcm}; + +mod controller; +pub use controller::{ + Controller, ExecuteController, ExecuteControllerWeightInfo, QueryController, + QueryControllerWeightInfo, SendController, SendControllerWeightInfo, +}; diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index 543b00e0118..189274eb5f5 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -414,7 +414,7 @@ pub fn response(query_id: u64) -> Option { /// Mock implementation of the [`QueryHandler`] trait for creating XCM success queries and expecting /// responses. pub struct TestQueryHandler(core::marker::PhantomData<(T, BlockNumber)>); -impl QueryHandler +impl QueryHandler for TestQueryHandler { type QueryId = u64; diff --git a/polkadot/xcm/xcm-executor/src/traits/on_response.rs b/polkadot/xcm/xcm-executor/src/traits/on_response.rs index 3558160dc87..ea41f242a97 100644 --- a/polkadot/xcm/xcm-executor/src/traits/on_response.rs +++ b/polkadot/xcm/xcm-executor/src/traits/on_response.rs @@ -14,10 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::Xcm; +use crate::{Junctions::Here, Xcm}; use core::result; -use frame_support::pallet_prelude::{Get, TypeInfo}; -use parity_scale_codec::{FullCodec, MaxEncodedLen}; +use frame_support::{ + pallet_prelude::{Get, TypeInfo}, + parameter_types, +}; +use parity_scale_codec::{Decode, Encode, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::Zero; use sp_std::fmt::Debug; use xcm::latest::{ @@ -103,7 +106,7 @@ impl VersionChangeNotifier for () { } /// The possible state of an XCM query response. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Encode, Decode)] pub enum QueryResponseStatus { /// The response has arrived, and includes the inner Response and the block number it arrived /// at. @@ -129,7 +132,7 @@ pub trait QueryHandler { + PartialEq + Debug + Copy; - type BlockNumber: Zero; + type BlockNumber: Zero + Encode; type Error; type UniversalLocation: Get; @@ -165,3 +168,36 @@ pub trait QueryHandler { #[cfg(feature = "runtime-benchmarks")] fn expect_response(id: Self::QueryId, response: Response); } + +parameter_types! { + pub UniversalLocation: InteriorMultiLocation = Here; +} + +impl QueryHandler for () { + type BlockNumber = u64; + type Error = (); + type QueryId = u64; + type UniversalLocation = UniversalLocation; + + fn take_response(_query_id: Self::QueryId) -> QueryResponseStatus { + QueryResponseStatus::NotFound + } + fn new_query( + _responder: impl Into, + _timeout: Self::BlockNumber, + _match_querier: impl Into, + ) -> Self::QueryId { + 0u64 + } + + fn report_outcome( + _message: &mut Xcm<()>, + _responder: impl Into, + _timeout: Self::BlockNumber, + ) -> Result { + Err(()) + } + + #[cfg(feature = "runtime-benchmarks")] + fn expect_response(_id: Self::QueryId, _response: crate::Response) {} +} diff --git a/prdoc/pr_2086.prdoc b/prdoc/pr_2086.prdoc new file mode 100644 index 00000000000..a9bbd0729d5 --- /dev/null +++ b/prdoc/pr_2086.prdoc @@ -0,0 +1,15 @@ +title: "Contracts: Add XCM traits to interface with contracts" + +doc: + - audience: Core Dev + description: | + We are introducing a new set of `XcmController` traits in `pallet-xcm`. + These traits extract functionality from `pallet-xcm` and provide high-level interaction with XCM. + They enable other pallets, like `pallet_contracts`, to rely on these traits instead of tight coupling to `pallet-xcm` itself. + +crates: + - name: "pallet-xcm" + semver: patch + - name: "xcm-executor" + semver: patch + -- GitLab From 0c5dcca9e3cef6b2f456fccefd9f6c5e43444053 Mon Sep 17 00:00:00 2001 From: Daniel Olano Date: Sat, 11 Nov 2023 20:34:08 +0100 Subject: [PATCH 04/74] Add `s` utility function to frame support (#2275) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A utility function I consider quite useful to declare string literals that are backed by an array. --------- Co-authored-by: Bastian Köcher Co-authored-by: Davide Galassi --- substrate/primitives/runtime/src/lib.rs | 26 +++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index 0e1d4c31fd7..ddf92554c83 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -954,6 +954,32 @@ pub fn print(print: impl traits::Printable) { print.print(); } +/// Utility function to declare string literals backed by an array of length N. +/// +/// The input can be shorter than N, in that case the end of the array is padded with zeros. +/// +/// [`str_array`] is useful when converting strings that end up in the storage as fixed size arrays +/// or in const contexts where static data types have strings that could also end up in the storage. +/// +/// # Example +/// +/// ```rust +/// # use sp_runtime::str_array; +/// const MY_STR: [u8; 6] = str_array("data"); +/// assert_eq!(MY_STR, *b"data\0\0"); +/// ``` +pub const fn str_array(s: &str) -> [u8; N] { + debug_assert!(s.len() <= N, "String literal doesn't fit in array"); + let mut i = 0; + let mut arr = [0; N]; + let s = s.as_bytes(); + while i < s.len() { + arr[i] = s[i]; + i += 1; + } + arr +} + /// Describes on what should happen with a storage transaction. pub enum TransactionOutcome { /// Commit the transaction. -- GitLab From 951bcceba085f1d3f5d022b1c211e5150fe8d2b2 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 13 Nov 2023 07:33:37 +0200 Subject: [PATCH 05/74] Unify `ChainSync` actions under one enum (#2180) All `ChainSync` actions that `SyncingEngine` should perform are unified under one `ChainSyncAction`. Processing of these actions put into a single place after `select!` in `SyncingEngine::run` instead of multiple places where calling `ChainSync` methods. --- .../client/network/sync/src/chain_sync.rs | 277 +++++++----------- .../network/sync/src/chain_sync/test.rs | 202 ++++++++----- substrate/client/network/sync/src/engine.rs | 163 +++++------ 3 files changed, 314 insertions(+), 328 deletions(-) diff --git a/substrate/client/network/sync/src/chain_sync.rs b/substrate/client/network/sync/src/chain_sync.rs index 858125f93f1..2adc6d42341 100644 --- a/substrate/client/network/sync/src/chain_sync.rs +++ b/substrate/client/network/sync/src/chain_sync.rs @@ -184,90 +184,26 @@ struct GapSync { target: NumberFor, } -/// Action that the parent of [`ChainSync`] should perform after reporting imported blocks with -/// [`ChainSync::on_blocks_processed`]. -pub enum BlockRequestAction { +/// Action that the parent of [`ChainSync`] should perform after reporting a network or block event. +#[derive(Debug)] +pub enum ChainSyncAction { /// Send block request to peer. Always implies dropping a stale block request to the same peer. - SendRequest { peer_id: PeerId, request: BlockRequest }, + SendBlockRequest { peer_id: PeerId, request: BlockRequest }, /// Drop stale block request. - RemoveStale { peer_id: PeerId }, -} - -/// Action that the parent of [`ChainSync`] should perform if we want to import blocks. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct ImportBlocksAction { - pub origin: BlockOrigin, - pub blocks: Vec>, -} - -/// Action that the parent of [`ChainSync`] should perform if we want to import justifications. -pub struct ImportJustificationsAction { - pub peer_id: PeerId, - pub hash: B::Hash, - pub number: NumberFor, - pub justifications: Justifications, -} - -/// Result of [`ChainSync::on_block_data`]. -#[derive(Debug, Clone, PartialEq, Eq)] -enum OnBlockData { - /// The block should be imported. - Import(ImportBlocksAction), - /// A new block request needs to be made to the given peer. - Request(PeerId, BlockRequest), - /// Continue processing events. - Continue, -} - -/// Result of [`ChainSync::on_block_justification`]. -#[derive(Debug, Clone, PartialEq, Eq)] -enum OnBlockJustification { - /// The justification needs no further handling. - Nothing, - /// The justification should be imported. - Import { + CancelBlockRequest { peer_id: PeerId }, + /// Peer misbehaved. Disconnect, report it and cancel the block request to it. + DropPeer(BadPeer), + /// Import blocks. + ImportBlocks { origin: BlockOrigin, blocks: Vec> }, + /// Import justifications. + ImportJustifications { peer_id: PeerId, - hash: Block::Hash, - number: NumberFor, + hash: B::Hash, + number: NumberFor, justifications: Justifications, }, } -// Result of [`ChainSync::on_state_data`]. -#[derive(Debug)] -enum OnStateData { - /// The block and state that should be imported. - Import(BlockOrigin, IncomingBlock), - /// A new state request needs to be made to the given peer. - Continue, -} - -/// Action that the parent of [`ChainSync`] should perform after reporting block response with -/// [`ChainSync::on_block_response`]. -pub enum OnBlockResponse { - /// Nothing to do. - Nothing, - /// Perform block request. - SendBlockRequest { peer_id: PeerId, request: BlockRequest }, - /// Import blocks. - ImportBlocks(ImportBlocksAction), - /// Import justifications. - ImportJustifications(ImportJustificationsAction), - /// Invalid block response, the peer should be disconnected and reported. - DisconnectPeer(BadPeer), -} - -/// Action that the parent of [`ChainSync`] should perform after reporting state response with -/// [`ChainSync::on_state_response`]. -pub enum OnStateResponse { - /// Nothing to do. - Nothing, - /// Import blocks. - ImportBlocks(ImportBlocksAction), - /// Invalid state response, the peer should be disconnected and reported. - DisconnectPeer(BadPeer), -} - /// The main data structure which contains all the state for a chains /// active syncing strategy. pub struct ChainSync { @@ -313,6 +249,8 @@ pub struct ChainSync { import_existing: bool, /// Gap download process. gap_sync: Option>, + /// Pending actions. + actions: Vec>, } /// All the data we have about a Peer that we are trying to sync with @@ -427,6 +365,7 @@ where gap_sync: None, warp_sync_config, warp_sync_target_block_header: None, + actions: Vec::new(), }; sync.reset_sync_start_point()?; @@ -509,8 +448,17 @@ where } /// Notify syncing state machine that a new sync peer has connected. + pub fn new_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { + match self.new_peer_inner(peer_id, best_hash, best_number) { + Ok(Some(request)) => + self.actions.push(ChainSyncAction::SendBlockRequest { peer_id, request }), + Ok(None) => {}, + Err(bad_peer) => self.actions.push(ChainSyncAction::DropPeer(bad_peer)), + } + } + #[must_use] - pub fn new_peer( + fn new_peer_inner( &mut self, peer_id: PeerId, best_hash: B::Hash, @@ -727,7 +675,7 @@ where peer_id: &PeerId, request: Option>, response: BlockResponse, - ) -> Result, BadPeer> { + ) -> Result<(), BadPeer> { self.downloaded_blocks += response.blocks.len(); let mut gap = false; let new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(peer_id) { @@ -892,10 +840,12 @@ where start: *start, state: next_state, }; - return Ok(OnBlockData::Request( - *peer_id, - ancestry_request::(next_num), - )) + let request = ancestry_request::(next_num); + self.actions.push(ChainSyncAction::SendBlockRequest { + peer_id: *peer_id, + request, + }); + return Ok(()) } else { // Ancestry search is complete. Check if peer is on a stale fork unknown // to us and add it to sync targets if necessary. @@ -929,7 +879,7 @@ where .insert(*peer_id); } peer.state = PeerSyncState::Available; - Vec::new() + return Ok(()) } }, PeerSyncState::DownloadingWarpTargetBlock => { @@ -940,8 +890,7 @@ where match warp_sync.import_target_block( blocks.pop().expect("`blocks` len checked above."), ) { - warp::TargetBlockImportResult::Success => - return Ok(OnBlockData::Continue), + warp::TargetBlockImportResult::Success => return Ok(()), warp::TargetBlockImportResult::BadResponse => return Err(BadPeer(*peer_id, rep::VERIFICATION_FAIL)), } @@ -963,7 +912,7 @@ where "Logic error: we think we are downloading warp target block from {}, but no warp sync is happening.", peer_id, ); - return Ok(OnBlockData::Continue) + return Ok(()) } }, PeerSyncState::Available | @@ -1000,7 +949,9 @@ where return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) }; - Ok(OnBlockData::Import(self.validate_and_queue_blocks(new_blocks, gap))) + self.validate_and_queue_blocks(new_blocks, gap); + + Ok(()) } /// Submit a justification response for processing. @@ -1009,7 +960,7 @@ where &mut self, peer_id: PeerId, response: BlockResponse, - ) -> Result, BadPeer> { + ) -> Result<(), BadPeer> { let peer = if let Some(peer) = self.peers.get_mut(&peer_id) { peer } else { @@ -1017,7 +968,7 @@ where target: LOG_TARGET, "💔 Called on_block_justification with a peer ID of an unknown peer", ); - return Ok(OnBlockJustification::Nothing) + return Ok(()) }; self.allowed_requests.add(&peer_id); @@ -1054,11 +1005,17 @@ where if let Some((peer_id, hash, number, justifications)) = self.extra_justifications.on_response(peer_id, justification) { - return Ok(OnBlockJustification::Import { peer_id, hash, number, justifications }) + self.actions.push(ChainSyncAction::ImportJustifications { + peer_id, + hash, + number, + justifications, + }); + return Ok(()) } } - Ok(OnBlockJustification::Nothing) + Ok(()) } /// Report a justification import (successful or not). @@ -1196,8 +1153,7 @@ where } /// Notify that a sync peer has disconnected. - #[must_use] - pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Option> { + pub fn peer_disconnected(&mut self, peer_id: &PeerId) { self.blocks.clear_peer_download(peer_id); if let Some(gap_sync) = &mut self.gap_sync { gap_sync.blocks.clear_peer_download(peer_id) @@ -1212,7 +1168,9 @@ where let blocks = self.ready_blocks(); - (!blocks.is_empty()).then(|| self.validate_and_queue_blocks(blocks, false)) + if !blocks.is_empty() { + self.validate_and_queue_blocks(blocks, false); + } } /// Get prometheus metrics. @@ -1259,11 +1217,7 @@ where } } - fn validate_and_queue_blocks( - &mut self, - mut new_blocks: Vec>, - gap: bool, - ) -> ImportBlocksAction { + fn validate_and_queue_blocks(&mut self, mut new_blocks: Vec>, gap: bool) { let orig_len = new_blocks.len(); new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); if new_blocks.len() != orig_len { @@ -1295,7 +1249,7 @@ where } self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); - ImportBlocksAction { origin, blocks: new_blocks } + self.actions.push(ChainSyncAction::ImportBlocks { origin, blocks: new_blocks }) } fn update_peer_common_number(&mut self, peer_id: &PeerId, new_common: NumberFor) { @@ -1346,7 +1300,7 @@ where /// Restart the sync process. This will reset all pending block requests and return an iterator /// of new block requests to make to peers. Peers that were downloading finality data (i.e. /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. - fn restart(&mut self) -> impl Iterator, BadPeer>> + '_ { + fn restart(&mut self) { self.blocks.clear(); if let Err(e) = self.reset_sync_start_point() { warn!(target: LOG_TARGET, "💔 Unable to restart sync: {e}"); @@ -1360,7 +1314,7 @@ where ); let old_peers = std::mem::take(&mut self.peers); - old_peers.into_iter().filter_map(move |(peer_id, mut p)| { + old_peers.into_iter().for_each(|(peer_id, mut p)| { // peers that were downloading justifications // should be kept in that state. if let PeerSyncState::DownloadingJustification(_) = p.state { @@ -1374,19 +1328,21 @@ where ); p.common_number = self.best_queued_number; self.peers.insert(peer_id, p); - return None + return } // handle peers that were in other states. - match self.new_peer(peer_id, p.best_hash, p.best_number) { + let action = match self.new_peer_inner(peer_id, p.best_hash, p.best_number) { // since the request is not a justification, remove it from pending responses - Ok(None) => Some(Ok(BlockRequestAction::RemoveStale { peer_id })), + Ok(None) => ChainSyncAction::CancelBlockRequest { peer_id }, // update the request if the new one is available - Ok(Some(request)) => Some(Ok(BlockRequestAction::SendRequest { peer_id, request })), + Ok(Some(request)) => ChainSyncAction::SendBlockRequest { peer_id, request }, // this implies that we need to drop pending response from the peer - Err(e) => Some(Err(e)), - } - }) + Err(bad_peer) => ChainSyncAction::DropPeer(bad_peer), + }; + + self.actions.push(action); + }); } /// Find a block to start sync from. If we sync with state, that's the latest block we have @@ -1534,13 +1490,12 @@ where } /// Submit blocks received in a response. - #[must_use] pub fn on_block_response( &mut self, peer_id: PeerId, request: BlockRequest, blocks: Vec>, - ) -> OnBlockResponse { + ) { let block_response = BlockResponse:: { id: request.id, blocks }; let blocks_range = || match ( @@ -1563,41 +1518,21 @@ where blocks_range(), ); - if request.fields == BlockAttributes::JUSTIFICATION { - match self.on_block_justification(peer_id, block_response) { - Ok(OnBlockJustification::Nothing) => OnBlockResponse::Nothing, - Ok(OnBlockJustification::Import { peer_id, hash, number, justifications }) => - OnBlockResponse::ImportJustifications(ImportJustificationsAction { - peer_id, - hash, - number, - justifications, - }), - Err(bad_peer) => OnBlockResponse::DisconnectPeer(bad_peer), - } + let res = if request.fields == BlockAttributes::JUSTIFICATION { + self.on_block_justification(peer_id, block_response) } else { - match self.on_block_data(&peer_id, Some(request), block_response) { - Ok(OnBlockData::Import(action)) => OnBlockResponse::ImportBlocks(action), - Ok(OnBlockData::Request(peer_id, request)) => - OnBlockResponse::SendBlockRequest { peer_id, request }, - Ok(OnBlockData::Continue) => OnBlockResponse::Nothing, - Err(bad_peer) => OnBlockResponse::DisconnectPeer(bad_peer), - } + self.on_block_data(&peer_id, Some(request), block_response) + }; + + if let Err(bad_peer) = res { + self.actions.push(ChainSyncAction::DropPeer(bad_peer)); } } /// Submit a state received in a response. - #[must_use] - pub fn on_state_response( - &mut self, - peer_id: PeerId, - response: OpaqueStateResponse, - ) -> OnStateResponse { - match self.on_state_data(&peer_id, response) { - Ok(OnStateData::Import(origin, block)) => - OnStateResponse::ImportBlocks(ImportBlocksAction { origin, blocks: vec![block] }), - Ok(OnStateData::Continue) => OnStateResponse::Nothing, - Err(bad_peer) => OnStateResponse::DisconnectPeer(bad_peer), + pub fn on_state_response(&mut self, peer_id: PeerId, response: OpaqueStateResponse) { + if let Err(bad_peer) = self.on_state_data(&peer_id, response) { + self.actions.push(ChainSyncAction::DropPeer(bad_peer)); } } @@ -1833,11 +1768,12 @@ where None } + #[must_use] fn on_state_data( &mut self, peer_id: &PeerId, response: OpaqueStateResponse, - ) -> Result, BadPeer> { + ) -> Result<(), BadPeer> { let response: Box = response.0.downcast().map_err(|_error| { error!( target: LOG_TARGET, @@ -1892,9 +1828,10 @@ where state: Some(state), }; debug!(target: LOG_TARGET, "State download is complete. Import is queued"); - Ok(OnStateData::Import(origin, block)) + self.actions.push(ChainSyncAction::ImportBlocks { origin, blocks: vec![block] }); + Ok(()) }, - ImportResult::Continue => Ok(OnStateData::Continue), + ImportResult::Continue => Ok(()), ImportResult::BadResponse => { debug!(target: LOG_TARGET, "Bad state data received from {peer_id}"); Err(BadPeer(*peer_id, rep::BAD_BLOCK)) @@ -1903,12 +1840,7 @@ where } /// Submit a warp proof response received. - #[must_use] - pub fn on_warp_sync_response( - &mut self, - peer_id: &PeerId, - response: EncodedProof, - ) -> Result<(), BadPeer> { + pub fn on_warp_sync_response(&mut self, peer_id: &PeerId, response: EncodedProof) { if let Some(peer) = self.peers.get_mut(peer_id) { if let PeerSyncState::DownloadingWarpProof = peer.state { peer.state = PeerSyncState::Available; @@ -1925,14 +1857,16 @@ where sync.import_warp_proof(response) } else { debug!(target: LOG_TARGET, "Ignored obsolete warp sync response from {peer_id}"); - return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + self.actions + .push(ChainSyncAction::DropPeer(BadPeer(*peer_id, rep::NOT_REQUESTED))); + return }; match import_result { - WarpProofImportResult::Success => Ok(()), + WarpProofImportResult::Success => {}, WarpProofImportResult::BadResponse => { debug!(target: LOG_TARGET, "Bad proof data received from {peer_id}"); - Err(BadPeer(*peer_id, rep::BAD_BLOCK)) + self.actions.push(ChainSyncAction::DropPeer(BadPeer(*peer_id, rep::BAD_BLOCK))); }, } } @@ -1942,17 +1876,14 @@ where /// Call this when a batch of blocks have been processed by the import /// queue, with or without errors. If an error is returned, the pending response /// from the peer must be dropped. - #[must_use] pub fn on_blocks_processed( &mut self, imported: usize, count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) -> Box, BadPeer>>> { + ) { trace!(target: LOG_TARGET, "Imported {imported} of {count}"); - let mut output = Vec::new(); - let mut has_error = false; for (_, hash) in &results { self.queue_blocks.remove(hash); @@ -1993,7 +1924,10 @@ where if aux.bad_justification { if let Some(ref peer) = peer_id { warn!("💔 Sent block with bad justification to import"); - output.push(Err(BadPeer(*peer, rep::BAD_JUSTIFICATION))); + self.actions.push(ChainSyncAction::DropPeer(BadPeer( + *peer, + rep::BAD_JUSTIFICATION, + ))); } } @@ -2010,7 +1944,7 @@ where ); self.state_sync = None; self.mode = SyncMode::Full; - output.extend(self.restart()); + self.restart(); } let warp_sync_complete = self .warp_sync @@ -2024,7 +1958,7 @@ where ); self.warp_sync = None; self.mode = SyncMode::Full; - output.extend(self.restart()); + self.restart(); } let gap_sync_complete = self.gap_sync.as_ref().map_or(false, |s| s.target == number); @@ -2042,8 +1976,9 @@ where target: LOG_TARGET, "💔 Peer sent block with incomplete header to import", ); - output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); - output.extend(self.restart()); + self.actions + .push(ChainSyncAction::DropPeer(BadPeer(peer, rep::INCOMPLETE_HEADER))); + self.restart(); }, Err(BlockImportError::VerificationFailed(peer_id, e)) => { let extra_message = peer_id @@ -2055,10 +1990,11 @@ where ); if let Some(peer) = peer_id { - output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); + self.actions + .push(ChainSyncAction::DropPeer(BadPeer(peer, rep::VERIFICATION_FAIL))); } - output.extend(self.restart()); + self.restart(); }, Err(BlockImportError::BadBlock(peer_id)) => if let Some(peer) = peer_id { @@ -2066,7 +2002,7 @@ where target: LOG_TARGET, "💔 Block {hash:?} received from peer {peer} has been blacklisted", ); - output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); + self.actions.push(ChainSyncAction::DropPeer(BadPeer(peer, rep::BAD_BLOCK))); }, Err(BlockImportError::MissingState) => { // This may happen if the chain we were requesting upon has been discarded @@ -2078,14 +2014,19 @@ where warn!(target: LOG_TARGET, "💔 Error importing block {hash:?}: {}", e.unwrap_err()); self.state_sync = None; self.warp_sync = None; - output.extend(self.restart()); + self.restart(); }, Err(BlockImportError::Cancelled) => {}, }; } self.allowed_requests.set_all(); - Box::new(output.into_iter()) + } + + /// Get pending actions to perform. + #[must_use] + pub fn take_actions(&mut self) -> impl Iterator> { + std::mem::take(&mut self.actions).into_iter() } } diff --git a/substrate/client/network/sync/src/chain_sync/test.rs b/substrate/client/network/sync/src/chain_sync/test.rs index 2eefd2ad13e..15b2a95a07c 100644 --- a/substrate/client/network/sync/src/chain_sync/test.rs +++ b/substrate/client/network/sync/src/chain_sync/test.rs @@ -53,7 +53,7 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { }; // add a new peer with the same best block - sync.new_peer(peer_id, a1_hash, a1_number).unwrap(); + sync.new_peer(peer_id, a1_hash, a1_number); // and request a justification for the block sync.request_justification(&a1_hash, a1_number); @@ -74,10 +74,8 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { // if the peer replies with an empty response (i.e. it doesn't know the block), // the active request should be cleared. - assert_eq!( - sync.on_block_justification(peer_id, BlockResponse:: { id: 0, blocks: vec![] }), - Ok(OnBlockJustification::Nothing), - ); + sync.on_block_justification(peer_id, BlockResponse:: { id: 0, blocks: vec![] }) + .unwrap(); // there should be no in-flight requests assert_eq!(sync.extra_justifications.active_requests().count(), 0); @@ -119,8 +117,8 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { let (b1_hash, b1_number) = new_blocks(50); // add 2 peers at blocks that we don't have locally - sync.new_peer(peer_id1, Hash::random(), 42).unwrap(); - sync.new_peer(peer_id2, Hash::random(), 10).unwrap(); + sync.new_peer(peer_id1, Hash::random(), 42); + sync.new_peer(peer_id2, Hash::random(), 10); // we wil send block requests to these peers // for these blocks we don't know about @@ -130,7 +128,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { .all(|(p, _)| { p == peer_id1 || p == peer_id2 })); // add a new peer at a known block - sync.new_peer(peer_id3, b1_hash, b1_number).unwrap(); + sync.new_peer(peer_id3, b1_hash, b1_number); // we request a justification for a block we have locally sync.request_justification(&b1_hash, b1_number); @@ -148,14 +146,19 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { PeerSyncState::DownloadingJustification(b1_hash), ); + // clear old actions + let _ = sync.take_actions(); + // we restart the sync state - let block_requests = sync.restart(); + sync.restart(); + let actions = sync.take_actions().collect::>(); // which should make us send out block requests to the first two peers - assert!(block_requests.map(|r| r.unwrap()).all(|event| match event { - BlockRequestAction::SendRequest { peer_id, .. } => - peer_id == peer_id1 || peer_id == peer_id2, - BlockRequestAction::RemoveStale { .. } => false, + assert_eq!(actions.len(), 2); + assert!(actions.iter().all(|action| match action { + ChainSyncAction::SendBlockRequest { peer_id, .. } => + peer_id == &peer_id1 || peer_id == &peer_id2, + _ => false, })); // peer 3 should be unaffected it was downloading finality data @@ -166,7 +169,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { // Set common block to something that we don't have (e.g. failed import) sync.peers.get_mut(&peer_id3).unwrap().common_number = 100; - let _ = sync.restart().count(); + sync.restart(); assert_eq!(sync.peers.get(&peer_id3).unwrap().common_number, 50); } @@ -280,9 +283,8 @@ fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { let best_block = blocks.last().unwrap().clone(); let max_blocks_to_request = sync.max_blocks_per_request; // Connect the node we will sync from - sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) - .unwrap(); - sync.new_peer(peer_id2, info.best_hash, 0).unwrap(); + sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()); + sync.new_peer(peer_id2, info.best_hash, 0); let mut best_block_num = 0; while best_block_num < MAX_DOWNLOAD_AHEAD { @@ -300,11 +302,17 @@ fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { let response = create_block_response(resp_blocks.clone()); - let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + // Clear old actions to not deal with them + let _ = sync.take_actions(); + + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == max_blocks_to_request as usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == max_blocks_to_request as usize, + )); best_block_num += max_blocks_to_request as u32; @@ -356,11 +364,14 @@ fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { assert_eq!(FromBlock::Number(best_block_num as u64), peer2_req.from); let response = create_block_response(vec![blocks[(best_block_num - 1) as usize].clone()]); - let res = sync.on_block_data(&peer_id2, Some(peer2_req), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.is_empty() - ),); + + // Clear old actions to not deal with them + let _ = sync.take_actions(); + + sync.on_block_data(&peer_id2, Some(peer2_req), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert!(actions.is_empty()); let peer1_from = unwrap_from_block_number(peer1_req.unwrap().from); @@ -421,25 +432,34 @@ fn can_sync_huge_fork() { let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); let mut request = get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); + // Discard old actions we are not interested in + let _ = sync.take_actions(); + // Do the ancestor search loop { let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; let response = create_block_response(vec![block.clone()]); - let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - request = if let OnBlockData::Request(_peer, request) = on_block_data { - request - } else { + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + + request = if actions.is_empty() { // We found the ancenstor break + } else { + assert_eq!(actions.len(), 1); + match &actions[0] { + ChainSyncAction::SendBlockRequest { peer_id: _, request } => request.clone(), + action @ _ => panic!("Unexpected action: {action:?}"), + } }; log::trace!(target: LOG_TARGET, "Request: {request:?}"); @@ -463,15 +483,18 @@ fn can_sync_huge_fork() { let response = create_block_response(resp_blocks.clone()); - let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == sync.max_blocks_per_request as usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == sync.max_blocks_per_request as usize + )); best_block_num += sync.max_blocks_per_request as u32; - let _ = sync.on_blocks_processed( + sync.on_blocks_processed( max_blocks_to_request as usize, max_blocks_to_request as usize, resp_blocks @@ -490,6 +513,9 @@ fn can_sync_huge_fork() { .collect(), ); + // Discard pending actions + let _ = sync.take_actions(); + resp_blocks .into_iter() .rev() @@ -539,25 +565,34 @@ fn syncs_fork_without_duplicate_requests() { let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); let mut request = get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); + // Discard pending actions + let _ = sync.take_actions(); + // Do the ancestor search loop { let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; let response = create_block_response(vec![block.clone()]); - let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - request = if let OnBlockData::Request(_peer, request) = on_block_data { - request - } else { + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + + request = if actions.is_empty() { // We found the ancenstor break + } else { + assert_eq!(actions.len(), 1); + match &actions[0] { + ChainSyncAction::SendBlockRequest { peer_id: _, request } => request.clone(), + action @ _ => panic!("Unexpected action: {action:?}"), + } }; log::trace!(target: LOG_TARGET, "Request: {request:?}"); @@ -582,11 +617,17 @@ fn syncs_fork_without_duplicate_requests() { let response = create_block_response(resp_blocks.clone()); - let res = sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); + // Discard old actions + let _ = sync.take_actions(); + + sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == max_blocks_to_request as usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == max_blocks_to_request as usize + )); best_block_num += max_blocks_to_request as u32; @@ -653,8 +694,7 @@ fn removes_target_fork_on_disconnect() { let peer_id1 = PeerId::random(); let common_block = blocks[1].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); // Create a "new" header and announce it let mut header = blocks[0].header().clone(); @@ -678,8 +718,7 @@ fn can_import_response_with_missing_blocks() { let peer_id1 = PeerId::random(); let best_block = blocks[3].clone(); - sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()); sync.peers.get_mut(&peer_id1).unwrap().state = PeerSyncState::Available; sync.peers.get_mut(&peer_id1).unwrap().common_number = 0; @@ -730,7 +769,7 @@ fn sync_restart_removes_block_but_not_justification_requests() { let (b1_hash, b1_number) = new_blocks(50); // add new peer and request blocks from them - sync.new_peer(peers[0], Hash::random(), 42).unwrap(); + sync.new_peer(peers[0], Hash::random(), 42); // we don't actually perform any requests, just keep track of peers waiting for a response let mut pending_responses = HashSet::new(); @@ -743,7 +782,7 @@ fn sync_restart_removes_block_but_not_justification_requests() { } // add a new peer at a known block - sync.new_peer(peers[1], b1_hash, b1_number).unwrap(); + sync.new_peer(peers[1], b1_hash, b1_number); // we request a justification for a block we have locally sync.request_justification(&b1_hash, b1_number); @@ -766,24 +805,29 @@ fn sync_restart_removes_block_but_not_justification_requests() { ); assert_eq!(pending_responses.len(), 2); + // discard old actions + let _ = sync.take_actions(); + // restart sync - let request_events = sync.restart().collect::>(); - for event in request_events.iter() { - match event.as_ref().unwrap() { - BlockRequestAction::RemoveStale { peer_id } => { + sync.restart(); + let actions = sync.take_actions().collect::>(); + for action in actions.iter() { + match action { + ChainSyncAction::CancelBlockRequest { peer_id } => { pending_responses.remove(&peer_id); }, - BlockRequestAction::SendRequest { peer_id, .. } => { + ChainSyncAction::SendBlockRequest { peer_id, .. } => { // we drop obsolete response, but don't register a new request, it's checked in // the `assert!` below pending_responses.remove(&peer_id); }, + action @ _ => panic!("Unexpected action: {action:?}"), } } - assert!(request_events.iter().any(|event| { - match event.as_ref().unwrap() { - BlockRequestAction::RemoveStale { .. } => false, - BlockRequestAction::SendRequest { peer_id, .. } => peer_id == &peers[0], + assert!(actions.iter().any(|action| { + match action { + ChainSyncAction::SendBlockRequest { peer_id, .. } => peer_id == &peers[0], + _ => false, } })); @@ -848,11 +892,9 @@ fn request_across_forks() { // Add the peers, all at the common ancestor 100. let common_block = blocks.last().unwrap(); let peer_id1 = PeerId::random(); - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); let peer_id2 = PeerId::random(); - sync.new_peer(peer_id2, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id2, common_block.hash(), *common_block.header().number()); // Peer 1 announces 107 from fork 1, 100-107 get downloaded. { @@ -864,11 +906,17 @@ fn request_across_forks() { let mut resp_blocks = fork_a_blocks[100_usize..107_usize].to_vec(); resp_blocks.reverse(); let response = create_block_response(resp_blocks.clone()); - let res = sync.on_block_data(&peer, Some(request), response).unwrap(); + + // Drop old actions + let _ = sync.take_actions(); + + sync.on_block_data(&peer, Some(request), response).unwrap(); + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == 7_usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == 7_usize + )); assert_eq!(sync.best_queued_number, 107); assert_eq!(sync.best_queued_hash, block.hash()); assert!(sync.is_known(&block.header.parent_hash())); @@ -903,11 +951,17 @@ fn request_across_forks() { // block is announced. let request = get_block_request(&mut sync, FromBlock::Hash(block.hash()), 1, &peer); let response = create_block_response(vec![block.clone()]); - let res = sync.on_block_data(&peer, Some(request), response).unwrap(); + + // Drop old actions we are not going to check + let _ = sync.take_actions(); + + sync.on_block_data(&peer, Some(request), response).unwrap(); + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == 1_usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == 1_usize + )); assert!(sync.is_known(&block.header.parent_hash())); } } diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 560887132e3..58a9fdc49f2 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -25,10 +25,7 @@ use crate::{ }, block_relay_protocol::{BlockDownloader, BlockResponseError}, block_request_handler::MAX_BLOCKS_IN_RESPONSE, - chain_sync::{ - BlockRequestAction, ChainSync, ImportBlocksAction, ImportJustificationsAction, - OnBlockResponse, OnStateResponse, - }, + chain_sync::{ChainSync, ChainSyncAction}, pending_responses::{PendingResponses, ResponseEvent}, schema::v1::{StateRequest, StateResponse}, service::{ @@ -58,7 +55,7 @@ use schnellru::{ByLength, LruMap}; use tokio::time::{Interval, MissedTickBehavior}; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; -use sc_consensus::import_queue::ImportQueueService; +use sc_consensus::{import_queue::ImportQueueService, IncomingBlock}; use sc_network::{ config::{ FullNetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, @@ -74,8 +71,11 @@ use sc_network_common::{ }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::{Error as ClientError, HeaderMetadata}; -use sp_consensus::block_validation::BlockAnnounceValidator; -use sp_runtime::traits::{Block as BlockT, Header, NumberFor, Zero}; +use sp_consensus::{block_validation::BlockAnnounceValidator, BlockOrigin}; +use sp_runtime::{ + traits::{Block as BlockT, Header, NumberFor, Zero}, + Justifications, +}; use std::{ collections::{HashMap, HashSet}, @@ -713,11 +713,67 @@ where self.is_major_syncing .store(self.chain_sync.status().state.is_major_syncing(), Ordering::Relaxed); + // Process actions requested by `ChainSync` during `select!`. + self.process_chain_sync_actions(); + // Send outbound requests on `ChanSync`'s behalf. self.send_chain_sync_requests(); } } + fn process_chain_sync_actions(&mut self) { + self.chain_sync.take_actions().for_each(|action| match action { + ChainSyncAction::SendBlockRequest { peer_id, request } => { + // Sending block request implies dropping obsolete pending response as we are not + // interested in it anymore (see [`ChainSyncAction::SendBlockRequest`]). + // Furthermore, only one request at a time is allowed to any peer. + let removed = self.pending_responses.remove(&peer_id); + self.send_block_request(peer_id, request.clone()); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::SendBlockRequest` to {} with {:?}, stale response removed: {}.", + peer_id, + request, + removed, + ) + }, + ChainSyncAction::CancelBlockRequest { peer_id } => { + let removed = self.pending_responses.remove(&peer_id); + + trace!(target: LOG_TARGET, "Processed {action:?}., response removed: {removed}."); + }, + ChainSyncAction::DropPeer(BadPeer(peer_id, rep)) => { + self.pending_responses.remove(&peer_id); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + self.network_service.report_peer(peer_id, rep); + + trace!(target: LOG_TARGET, "Processed {action:?}."); + }, + ChainSyncAction::ImportBlocks { origin, blocks } => { + let count = blocks.len(); + self.import_blocks(origin, blocks); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::ImportBlocks` with {count} blocks.", + ); + }, + ChainSyncAction::ImportJustifications { peer_id, hash, number, justifications } => { + self.import_justifications(peer_id, hash, number, justifications); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::ImportJustifications` from peer {} for block {} ({}).", + peer_id, + hash, + number, + ) + }, + }); + } + fn perform_periodic_actions(&mut self) { self.report_metrics(); @@ -766,28 +822,7 @@ where ToServiceCommand::ClearJustificationRequests => self.chain_sync.clear_justification_requests(), ToServiceCommand::BlocksProcessed(imported, count, results) => { - for result in self.chain_sync.on_blocks_processed(imported, count, results) { - match result { - Ok(action) => match action { - BlockRequestAction::SendRequest { peer_id, request } => { - // drop obsolete pending response first - self.pending_responses.remove(&peer_id); - self.send_block_request(peer_id, request); - }, - BlockRequestAction::RemoveStale { peer_id } => { - self.pending_responses.remove(&peer_id); - }, - }, - Err(BadPeer(peer_id, repu)) => { - self.pending_responses.remove(&peer_id); - self.network_service.disconnect_peer( - peer_id, - self.block_announce_protocol_name.clone(), - ); - self.network_service.report_peer(peer_id, repu) - }, - } - } + self.chain_sync.on_blocks_processed(imported, count, results); }, ToServiceCommand::JustificationImported(peer_id, hash, number, success) => { self.chain_sync.on_justification_import(hash, number, success); @@ -940,9 +975,7 @@ where } } - if let Some(import_blocks_action) = self.chain_sync.peer_disconnected(&peer_id) { - self.import_blocks(import_blocks_action) - } + self.chain_sync.peer_disconnected(&peer_id); self.pending_responses.remove(&peer_id); self.event_streams.retain(|stream| { @@ -1053,17 +1086,7 @@ where inbound, }; - let req = if peer.info.roles.is_full() { - match self.chain_sync.new_peer(peer_id, peer.info.best_hash, peer.info.best_number) { - Ok(req) => req, - Err(BadPeer(id, repu)) => { - self.network_service.report_peer(id, repu); - return Err(()) - }, - } - } else { - None - }; + self.chain_sync.new_peer(peer_id, peer.info.best_hash, peer.info.best_number); log::debug!(target: LOG_TARGET, "Connected {peer_id}"); @@ -1075,10 +1098,6 @@ where self.num_in_peers += 1; } - if let Some(req) = req { - self.send_block_request(peer_id, req); - } - self.event_streams .retain(|stream| stream.unbounded_send(SyncEvent::PeerConnected(peer_id)).is_ok()); @@ -1202,22 +1221,7 @@ where PeerRequest::Block(req) => { match self.block_downloader.block_response_into_blocks(&req, resp) { Ok(blocks) => { - match self.chain_sync.on_block_response(peer_id, req, blocks) { - OnBlockResponse::SendBlockRequest { peer_id, request } => - self.send_block_request(peer_id, request), - OnBlockResponse::ImportBlocks(import_blocks_action) => - self.import_blocks(import_blocks_action), - OnBlockResponse::ImportJustifications(action) => - self.import_justifications(action), - OnBlockResponse::Nothing => {}, - OnBlockResponse::DisconnectPeer(BadPeer(peer_id, rep)) => { - self.network_service.disconnect_peer( - peer_id, - self.block_announce_protocol_name.clone(), - ); - self.network_service.report_peer(peer_id, rep); - }, - } + self.chain_sync.on_block_response(peer_id, req, blocks); }, Err(BlockResponseError::DecodeFailed(e)) => { debug!( @@ -1262,27 +1266,10 @@ where }, }; - match self.chain_sync.on_state_response(peer_id, response) { - OnStateResponse::ImportBlocks(import_blocks_action) => - self.import_blocks(import_blocks_action), - OnStateResponse::DisconnectPeer(BadPeer(peer_id, rep)) => { - self.network_service.disconnect_peer( - peer_id, - self.block_announce_protocol_name.clone(), - ); - self.network_service.report_peer(peer_id, rep); - }, - OnStateResponse::Nothing => {}, - } + self.chain_sync.on_state_response(peer_id, response); }, PeerRequest::WarpProof => { - if let Err(BadPeer(peer_id, rep)) = - self.chain_sync.on_warp_sync_response(&peer_id, EncodedProof(resp)) - { - self.network_service - .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(peer_id, rep); - } + self.chain_sync.on_warp_sync_response(&peer_id, EncodedProof(resp)); }, }, Ok(Err(e)) => { @@ -1388,7 +1375,7 @@ where } /// Import blocks. - fn import_blocks(&mut self, ImportBlocksAction { origin, blocks }: ImportBlocksAction) { + fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { if let Some(metrics) = &self.metrics { metrics.import_queue_blocks_submitted.inc(); } @@ -1397,13 +1384,17 @@ where } /// Import justifications. - fn import_justifications(&mut self, action: ImportJustificationsAction) { + fn import_justifications( + &mut self, + peer_id: PeerId, + hash: B::Hash, + number: NumberFor, + justifications: Justifications, + ) { if let Some(metrics) = &self.metrics { metrics.import_queue_justifications_submitted.inc(); } - let ImportJustificationsAction { peer_id, hash, number, justifications } = action; - self.import_queue.import_justifications(peer_id, hash, number, justifications); } } -- GitLab From 5f4ce8026693b537beaee3aea5161ee34bac7ace Mon Sep 17 00:00:00 2001 From: Marcin S Date: Mon, 13 Nov 2023 11:21:16 +0100 Subject: [PATCH 06/74] PVF host: Make unavailable security features print a warning (#2244) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher --- polkadot/node/core/pvf/src/host.rs | 19 +- polkadot/node/core/pvf/src/security.rs | 275 +++++++++++++++++-------- 2 files changed, 189 insertions(+), 105 deletions(-) diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index dd0bd858198..7b383e8034a 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -29,12 +29,11 @@ use crate::{ use always_assert::never; use futures::{ channel::{mpsc, oneshot}, - join, Future, FutureExt, SinkExt, StreamExt, + Future, FutureExt, SinkExt, StreamExt, }; use polkadot_node_core_pvf_common::{ error::{PrepareError, PrepareResult}, pvf::PvfPrepData, - SecurityStatus, }; use polkadot_parachain_primitives::primitives::ValidationResult; use std::{ @@ -208,21 +207,7 @@ pub async fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Fu gum::debug!(target: LOG_TARGET, ?config, "starting PVF validation host"); // Run checks for supported security features once per host startup. Warn here if not enabled. - let security_status = { - // TODO: add check that syslog is available and that seccomp violations are logged? - let (can_enable_landlock, can_enable_seccomp, can_unshare_user_namespace_and_change_root) = join!( - security::check_landlock(&config.prepare_worker_program_path), - security::check_seccomp(&config.prepare_worker_program_path), - security::check_can_unshare_user_namespace_and_change_root( - &config.prepare_worker_program_path - ) - ); - SecurityStatus { - can_enable_landlock, - can_enable_seccomp, - can_unshare_user_namespace_and_change_root, - } - }; + let security_status = security::check_security_status(&config).await; let (to_host_tx, to_host_rx) = mpsc::channel(10); diff --git a/polkadot/node/core/pvf/src/security.rs b/polkadot/node/core/pvf/src/security.rs index decd321e415..295dd7df94d 100644 --- a/polkadot/node/core/pvf/src/security.rs +++ b/polkadot/node/core/pvf/src/security.rs @@ -14,22 +14,142 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::LOG_TARGET; -use std::path::Path; +use crate::{Config, SecurityStatus, LOG_TARGET}; +use futures::join; +use std::{fmt, path::Path}; use tokio::{ fs::{File, OpenOptions}, io::{AsyncReadExt, AsyncSeekExt, SeekFrom}, }; -/// Check if we can sandbox the root and emit a warning if not. +const SECURE_MODE_ANNOUNCEMENT: &'static str = + "In the next release this will be a hard error by default. + \nMore information: https://wiki.polkadot.network/docs/maintain-guides-secure-validator#secure-validator-mode"; + +/// Run checks for supported security features. +pub async fn check_security_status(config: &Config) -> SecurityStatus { + let Config { prepare_worker_program_path, .. } = config; + + // TODO: add check that syslog is available and that seccomp violations are logged? + let (landlock, seccomp, change_root) = join!( + check_landlock(prepare_worker_program_path), + check_seccomp(prepare_worker_program_path), + check_can_unshare_user_namespace_and_change_root(prepare_worker_program_path) + ); + + let security_status = SecurityStatus { + can_enable_landlock: landlock.is_ok(), + can_enable_seccomp: seccomp.is_ok(), + can_unshare_user_namespace_and_change_root: change_root.is_ok(), + }; + + let errs: Vec = [landlock, seccomp, change_root] + .into_iter() + .filter_map(|result| result.err()) + .collect(); + let err_occurred = print_secure_mode_message(errs); + if err_occurred { + gum::error!( + target: LOG_TARGET, + "{}", + SECURE_MODE_ANNOUNCEMENT, + ); + } + + security_status +} + +type SecureModeResult = std::result::Result<(), SecureModeError>; + +/// Errors related to enabling Secure Validator Mode. +#[derive(Debug)] +enum SecureModeError { + CannotEnableLandlock(String), + CannotEnableSeccomp(String), + CannotUnshareUserNamespaceAndChangeRoot(String), +} + +impl SecureModeError { + /// Whether this error is allowed with Secure Validator Mode enabled. + fn is_allowed_in_secure_mode(&self) -> bool { + use SecureModeError::*; + match self { + CannotEnableLandlock(_) => true, + CannotEnableSeccomp(_) => false, + CannotUnshareUserNamespaceAndChangeRoot(_) => false, + } + } +} + +impl fmt::Display for SecureModeError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use SecureModeError::*; + match self { + CannotEnableLandlock(err) => write!(f, "Cannot enable landlock, a Linux 5.13+ kernel security feature: {err}"), + CannotEnableSeccomp(err) => write!(f, "Cannot enable seccomp, a Linux-specific kernel security feature: {err}"), + CannotUnshareUserNamespaceAndChangeRoot(err) => write!(f, "Cannot unshare user namespace and change root, which are Linux-specific kernel security features: {err}"), + } + } +} + +/// Errors if Secure Validator Mode and some mandatory errors occurred, warn otherwise. +/// +/// # Returns +/// +/// `true` if an error was printed, `false` otherwise. +fn print_secure_mode_message(errs: Vec) -> bool { + // Trying to run securely and some mandatory errors occurred. + const SECURE_MODE_ERROR: &'static str = "🚨 Your system cannot securely run a validator. \ + \nRunning validation of malicious PVF code has a higher risk of compromising this machine."; + // Some errors occurred when running insecurely, or some optional errors occurred when running + // securely. + const SECURE_MODE_WARNING: &'static str = "🚨 Some security issues have been detected. \ + \nRunning validation of malicious PVF code has a higher risk of compromising this machine."; + + if errs.is_empty() { + return false + } + + let errs_allowed = errs.iter().all(|err| err.is_allowed_in_secure_mode()); + let errs_string: String = errs + .iter() + .map(|err| { + format!( + "\n - {}{}", + if err.is_allowed_in_secure_mode() { "Optional: " } else { "" }, + err + ) + }) + .collect(); + + if errs_allowed { + gum::warn!( + target: LOG_TARGET, + "{}{}", + SECURE_MODE_WARNING, + errs_string, + ); + false + } else { + gum::error!( + target: LOG_TARGET, + "{}{}", + SECURE_MODE_ERROR, + errs_string, + ); + true + } +} + +/// Check if we can change root to a new, sandboxed root and return an error if not. /// /// We do this check by spawning a new process and trying to sandbox it. To get as close as possible /// to running the check in a worker, we try it... in a worker. The expected return status is 0 on /// success and -1 on failure. -pub async fn check_can_unshare_user_namespace_and_change_root( +async fn check_can_unshare_user_namespace_and_change_root( #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] prepare_worker_program_path: &Path, -) -> bool { +) -> SecureModeResult { cfg_if::cfg_if! { if #[cfg(target_os = "linux")] { match tokio::process::Command::new(prepare_worker_program_path) @@ -37,50 +157,37 @@ pub async fn check_can_unshare_user_namespace_and_change_root( .output() .await { - Ok(output) if output.status.success() => true, + Ok(output) if output.status.success() => Ok(()), Ok(output) => { let stderr = std::str::from_utf8(&output.stderr) .expect("child process writes a UTF-8 string to stderr; qed") .trim(); - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - // Docs say to always print status using `Display` implementation. - status = %output.status, - %stderr, - "Cannot unshare user namespace and change root, which are Linux-specific kernel security features. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running with support for unsharing user namespaces for maximum security." - ); - false - }, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - "Could not start child process: {}", - err - ); - false + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + format!("not available: {}", stderr) + )) }, + Err(err) => + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + format!("could not start child process: {}", err) + )), } } else { - gum::warn!( - target: LOG_TARGET, - "Cannot unshare user namespace and change root, which are Linux-specific kernel security features. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with support for unsharing user namespaces for maximum security." - ); - false + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + "only available on Linux".into() + )) } } } -/// Check if landlock is supported and emit a warning if not. +/// Check if landlock is supported and return an error if not. /// /// We do this check by spawning a new process and trying to sandbox it. To get as close as possible /// to running the check in a worker, we try it... in a worker. The expected return status is 0 on /// success and -1 on failure. -pub async fn check_landlock( +async fn check_landlock( #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] prepare_worker_program_path: &Path, -) -> bool { +) -> SecureModeResult { cfg_if::cfg_if! { if #[cfg(target_os = "linux")] { match tokio::process::Command::new(prepare_worker_program_path) @@ -88,81 +195,73 @@ pub async fn check_landlock( .status() .await { - Ok(status) if status.success() => true, - Ok(status) => { + Ok(status) if status.success() => Ok(()), + Ok(_status) => { let abi = polkadot_node_core_pvf_common::worker::security::landlock::LANDLOCK_ABI as u8; - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - ?status, - %abi, - "Cannot fully enable landlock, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider upgrading the kernel version for maximum security." - ); - false - }, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - "Could not start child process: {}", - err - ); - false + Err(SecureModeError::CannotEnableLandlock( + format!("landlock ABI {} not available", abi) + )) }, + Err(err) => + Err(SecureModeError::CannotEnableLandlock( + format!("could not start child process: {}", err) + )), } } else { - gum::warn!( - target: LOG_TARGET, - "Cannot enable landlock, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with landlock support for maximum security." - ); - false + Err(SecureModeError::CannotEnableLandlock( + "only available on Linux".into() + )) } } } -/// Check if seccomp is supported and emit a warning if not. +/// Check if seccomp is supported and return an error if not. /// /// We do this check by spawning a new process and trying to sandbox it. To get as close as possible /// to running the check in a worker, we try it... in a worker. The expected return status is 0 on /// success and -1 on failure. -pub async fn check_seccomp( +async fn check_seccomp( #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] prepare_worker_program_path: &Path, -) -> bool { +) -> SecureModeResult { cfg_if::cfg_if! { if #[cfg(target_os = "linux")] { - match tokio::process::Command::new(prepare_worker_program_path) - .arg("--check-can-enable-seccomp") - .status() - .await - { - Ok(status) if status.success() => true, - Ok(status) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - ?status, - "Cannot fully enable seccomp, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider upgrading the kernel version for maximum security." - ); - false - }, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - "Could not start child process: {}", - err - ); - false - }, + cfg_if::cfg_if! { + if #[cfg(target_arch = "x86_64")] { + match tokio::process::Command::new(prepare_worker_program_path) + .arg("--check-can-enable-seccomp") + .status() + .await + { + Ok(status) if status.success() => Ok(()), + Ok(_status) => + Err(SecureModeError::CannotEnableSeccomp( + "not available".into() + )), + Err(err) => + Err(SecureModeError::CannotEnableSeccomp( + format!("could not start child process: {}", err) + )), + } + } else { + Err(SecureModeError::CannotEnableSeccomp( + "only supported on CPUs from the x86_64 family (usually Intel or AMD)".into() + )) + } } } else { - gum::warn!( - target: LOG_TARGET, - "Cannot enable seccomp, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with seccomp support for maximum security." - ); - false + cfg_if::cfg_if! { + if #[cfg(target_arch = "x86_64")] { + Err(SecureModeError::CannotEnableSeccomp( + "only supported on Linux".into() + )) + } else { + Err(SecureModeError::CannotEnableSeccomp( + "only supported on Linux and on CPUs from the x86_64 family (usually Intel or AMD).".into() + )) + } + } } } } -- GitLab From 604704a84ce690eca93fd053abec1bb0a2b0ee32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 13 Nov 2023 13:57:52 +0100 Subject: [PATCH 07/74] wasm-builder: Optimize `rerun-if-changed` logic (#2282) Optimizes the `rerun-if-changed` logic by ignoring `dev-dependencies` and also not outputting paths. Because outputting paths could lead to include unwanted crates in the rerun checks. --- substrate/utils/wasm-builder/src/wasm_project.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index c41e0935d75..2e6f671c45e 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -18,7 +18,7 @@ use crate::{write_file_if_changed, CargoCommandVersioned, OFFLINE}; use build_helper::rerun_if_changed; -use cargo_metadata::{CargoOpt, Metadata, MetadataCommand}; +use cargo_metadata::{DependencyKind, Metadata, MetadataCommand}; use parity_wasm::elements::{deserialize_buffer, Module}; use std::{ borrow::ToOwned, @@ -89,8 +89,7 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { cargo_manifest.to_path_buf() }; - let mut crate_metadata_command = create_metadata_command(cargo_manifest); - crate_metadata_command.features(CargoOpt::AllFeatures); + let crate_metadata_command = create_metadata_command(cargo_manifest); let crate_metadata = crate_metadata_command .exec() @@ -915,6 +914,11 @@ fn generate_rerun_if_changed_instructions( packages.insert(DeduplicatePackage::from(package)); while let Some(dependency) = dependencies.pop() { + // Ignore all dev dependencies + if dependency.kind == DependencyKind::Development { + continue; + } + let path_or_git_dep = dependency.source.as_ref().map(|s| s.starts_with("git+")).unwrap_or(true); @@ -967,9 +971,7 @@ fn package_rerun_if_changed(package: &DeduplicatePackage) { p.path() == manifest_path || !p.path().is_dir() || !p.path().join("Cargo.toml").exists() }) .filter_map(|p| p.ok().map(|p| p.into_path())) - .filter(|p| { - p.is_dir() || p.extension().map(|e| e == "rs" || e == "toml").unwrap_or_default() - }) + .filter(|p| p.extension().map(|e| e == "rs" || e == "toml").unwrap_or_default()) .for_each(rerun_if_changed); } -- GitLab From ebcf0a0f1cab2d43718ba96d26e5687f4d14580a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 13 Nov 2023 14:32:02 +0100 Subject: [PATCH 08/74] pallet-grandpa: Remove `GRANDPA_AUTHORITIES_KEY` (#2181) Remove the `GRANDPA_AUTHORITIES_KEY` key and its usage. Apparently this was used in the early days to communicate the grandpa authorities to the node. However, we have now a runtime api that does this for us. So, this pull request is moving from the custom managed storage item to a FRAME managed storage item. This pr also includes a migration for doing the switch on a running chain. --------- Co-authored-by: Davide Galassi --- polkadot/runtime/rococo/src/lib.rs | 2 + polkadot/runtime/westend/src/lib.rs | 1 + substrate/client/consensus/grandpa/src/lib.rs | 3 - substrate/frame/grandpa/src/lib.rs | 37 +++---- substrate/frame/grandpa/src/migrations.rs | 3 + substrate/frame/grandpa/src/migrations/v5.rs | 96 +++++++++++++++++++ .../primitives/consensus/grandpa/src/lib.rs | 65 +------------ 7 files changed, 124 insertions(+), 83 deletions(-) create mode 100644 substrate/frame/grandpa/src/migrations/v5.rs diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 697d22c311a..57767b70d23 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1497,6 +1497,8 @@ pub mod migrations { frame_support::migrations::RemovePallet::DbWeight>, frame_support::migrations::RemovePallet::DbWeight>, frame_support::migrations::RemovePallet::DbWeight>, + + pallet_grandpa::migrations::MigrateV4ToV5, ); } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index fe9ed22f437..1c97e54da48 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1558,6 +1558,7 @@ pub mod migrations { pallet_nomination_pools::migration::versioned_migrations::V5toV6, pallet_referenda::migration::v1::MigrateV0ToV1, pallet_nomination_pools::migration::versioned_migrations::V6ToV7, + pallet_grandpa::migrations::MigrateV4ToV5, ); } diff --git a/substrate/client/consensus/grandpa/src/lib.rs b/substrate/client/consensus/grandpa/src/lib.rs index da621abd254..a4584e6fc80 100644 --- a/substrate/client/consensus/grandpa/src/lib.rs +++ b/substrate/client/consensus/grandpa/src/lib.rs @@ -471,9 +471,6 @@ where Client: ExecutorProvider + HeaderBackend, { fn get(&self) -> Result { - // This implementation uses the Grandpa runtime API instead of reading directly from the - // `GRANDPA_AUTHORITIES_KEY` as the data may have been migrated since the genesis block of - // the chain, whereas the runtime API is backwards compatible. self.executor() .call( self.expect_block_hash_from_id(&BlockId::Number(Zero::zero()))?, diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 95d1c8aa609..0b9f2b35827 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -30,14 +30,13 @@ // Re-export since this is necessary for `impl_apis` in runtime. pub use sp_consensus_grandpa::{ - self as fg_primitives, AuthorityId, AuthorityList, AuthorityWeight, VersionedAuthorityList, + self as fg_primitives, AuthorityId, AuthorityList, AuthorityWeight, }; -use codec::{self as codec, Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::{DispatchResultWithPostInfo, Pays}, pallet_prelude::Get, - storage, traits::OneSessionHandler, weights::Weight, WeakBoundedVec, @@ -45,8 +44,8 @@ use frame_support::{ use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; use sp_consensus_grandpa::{ - ConsensusLog, EquivocationProof, ScheduledChange, SetId, GRANDPA_AUTHORITIES_KEY, - GRANDPA_ENGINE_ID, RUNTIME_LOG_TARGET as LOG_TARGET, + ConsensusLog, EquivocationProof, ScheduledChange, SetId, GRANDPA_ENGINE_ID, + RUNTIME_LOG_TARGET as LOG_TARGET, }; use sp_runtime::{generic::DigestItem, traits::Zero, DispatchResult}; use sp_session::{GetSessionNumber, GetValidatorCount}; @@ -75,7 +74,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -145,7 +144,7 @@ pub mod pallet { // enact the change if we've reached the enacting block if block_number == pending_change.scheduled_at + pending_change.delay { - Self::set_grandpa_authorities(&pending_change.next_authorities); + Authorities::::put(&pending_change.next_authorities); Self::deposit_event(Event::NewAuthorities { authority_set: pending_change.next_authorities.into_inner(), }); @@ -342,6 +341,11 @@ pub mod pallet { #[pallet::getter(fn session_for_set)] pub(super) type SetIdSession = StorageMap<_, Twox64Concat, SetId, SessionIndex>; + /// The current list of authorities. + #[pallet::storage] + pub(crate) type Authorities = + StorageValue<_, BoundedAuthorityList, ValueQuery>; + #[derive(frame_support::DefaultNoBound)] #[pallet::genesis_config] pub struct GenesisConfig { @@ -354,7 +358,7 @@ pub mod pallet { impl BuildGenesisConfig for GenesisConfig { fn build(&self) { CurrentSetId::::put(SetId::default()); - Pallet::::initialize(&self.authorities) + Pallet::::initialize(self.authorities.clone()) } } @@ -428,12 +432,7 @@ pub enum StoredState { impl Pallet { /// Get the current set of authorities, along with their respective weights. pub fn grandpa_authorities() -> AuthorityList { - storage::unhashed::get_or_default::(GRANDPA_AUTHORITIES_KEY).into() - } - - /// Set the current set of authorities, along with their respective weights. - fn set_grandpa_authorities(authorities: &AuthorityList) { - storage::unhashed::put(GRANDPA_AUTHORITIES_KEY, &VersionedAuthorityList::from(authorities)); + Authorities::::get().into_inner() } /// Schedule GRANDPA to pause starting in the given number of blocks. @@ -522,10 +521,14 @@ impl Pallet { // Perform module initialization, abstracted so that it can be called either through genesis // config builder or through `on_genesis_session`. - fn initialize(authorities: &AuthorityList) { + fn initialize(authorities: AuthorityList) { if !authorities.is_empty() { assert!(Self::grandpa_authorities().is_empty(), "Authorities are already initialized!"); - Self::set_grandpa_authorities(authorities); + Authorities::::put( + &BoundedAuthorityList::::try_from(authorities).expect( + "Grandpa: `Config::MaxAuthorities` is smaller than the number of genesis authorities!", + ), + ); } // NOTE: initialize first session of first set. this is necessary for @@ -568,7 +571,7 @@ where I: Iterator, { let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); - Self::initialize(&authorities); + Self::initialize(authorities); } fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) diff --git a/substrate/frame/grandpa/src/migrations.rs b/substrate/frame/grandpa/src/migrations.rs index 6307cbdd3b0..3a484eb60d2 100644 --- a/substrate/frame/grandpa/src/migrations.rs +++ b/substrate/frame/grandpa/src/migrations.rs @@ -22,8 +22,11 @@ use frame_support::{ use crate::{Config, CurrentSetId, SetIdSession, LOG_TARGET}; +pub use v5::MigrateV4ToV5; + /// Version 4. pub mod v4; +mod v5; /// This migration will clean up all stale set id -> session entries from the /// `SetIdSession` storage map, only the latest `max_set_id_session_entries` diff --git a/substrate/frame/grandpa/src/migrations/v5.rs b/substrate/frame/grandpa/src/migrations/v5.rs new file mode 100644 index 00000000000..24cfc34104b --- /dev/null +++ b/substrate/frame/grandpa/src/migrations/v5.rs @@ -0,0 +1,96 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{BoundedAuthorityList, Pallet}; +use codec::Decode; +use frame_support::{ + migrations::VersionedMigration, + storage, + traits::{Get, OnRuntimeUpgrade}, + weights::Weight, +}; +use sp_consensus_grandpa::AuthorityList; +use sp_std::{marker::PhantomData, vec::Vec}; + +const GRANDPA_AUTHORITIES_KEY: &[u8] = b":grandpa_authorities"; + +fn load_authority_list() -> AuthorityList { + storage::unhashed::get_raw(GRANDPA_AUTHORITIES_KEY).map_or_else( + || Vec::new(), + |l| <(u8, AuthorityList)>::decode(&mut &l[..]).unwrap_or_default().1, + ) +} + +/// Actual implementation of [`MigrateV4ToV5`]. +pub struct MigrateImpl(PhantomData); + +impl OnRuntimeUpgrade for MigrateImpl { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + + let authority_list_len = load_authority_list().len() as u32; + + if authority_list_len > T::MaxAuthorities::get() { + return Err( + "Grandpa: `Config::MaxAuthorities` is smaller than the actual number of authorities.".into() + ) + } + + if authority_list_len == 0 { + return Err("Grandpa: Authority list is empty!".into()) + } + + Ok(authority_list_len.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let len = u32::decode(&mut &state[..]).unwrap(); + + frame_support::ensure!( + len == crate::Pallet::::grandpa_authorities().len() as u32, + "Grandpa: pre-migrated and post-migrated list should have the same length" + ); + + frame_support::ensure!( + load_authority_list().is_empty(), + "Old authority list shouldn't exist anymore" + ); + + Ok(()) + } + + fn on_runtime_upgrade() -> Weight { + crate::Authorities::::put( + &BoundedAuthorityList::::force_from( + load_authority_list(), + Some("Grandpa: `Config::MaxAuthorities` is smaller than the actual number of authorities.") + ) + ); + + storage::unhashed::kill(GRANDPA_AUTHORITIES_KEY); + + T::DbWeight::get().reads_writes(1, 2) + } +} + +/// Migrate the storage from V4 to V5. +/// +/// Switches from `GRANDPA_AUTHORITIES_KEY` to a normal FRAME storage item. +pub type MigrateV4ToV5 = + VersionedMigration<4, 5, MigrateImpl, Pallet, ::DbWeight>; diff --git a/substrate/primitives/consensus/grandpa/src/lib.rs b/substrate/primitives/consensus/grandpa/src/lib.rs index baeaee4738e..1cf5504c5e7 100644 --- a/substrate/primitives/consensus/grandpa/src/lib.rs +++ b/substrate/primitives/consensus/grandpa/src/lib.rs @@ -19,13 +19,10 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(not(feature = "std"))] -extern crate alloc; - #[cfg(feature = "serde")] use serde::Serialize; -use codec::{Codec, Decode, Encode, Input}; +use codec::{Codec, Decode, Encode}; use scale_info::TypeInfo; #[cfg(feature = "std")] use sp_keystore::KeystorePtr; @@ -33,7 +30,7 @@ use sp_runtime::{ traits::{Header as HeaderT, NumberFor}, ConsensusEngineId, RuntimeDebug, }; -use sp_std::{borrow::Cow, vec::Vec}; +use sp_std::vec::Vec; /// The log target to be used by client code. pub const CLIENT_LOG_TARGET: &str = "grandpa"; @@ -62,10 +59,6 @@ pub type AuthoritySignature = app::Signature; /// The `ConsensusEngineId` of GRANDPA. pub const GRANDPA_ENGINE_ID: ConsensusEngineId = *b"FRNK"; -/// The storage key for the current set of weighted Grandpa authorities. -/// The value stored is an encoded VersionedAuthorityList. -pub const GRANDPA_AUTHORITIES_KEY: &[u8] = b":grandpa_authorities"; - /// The weight of an authority. pub type AuthorityWeight = u64; @@ -464,60 +457,6 @@ where Some(grandpa::SignedMessage { message, signature, id: public }) } -/// WASM function call to check for pending changes. -pub const PENDING_CHANGE_CALL: &str = "grandpa_pending_change"; -/// WASM function call to get current GRANDPA authorities. -pub const AUTHORITIES_CALL: &str = "grandpa_authorities"; - -/// The current version of the stored AuthorityList type. The encoding version MUST be updated any -/// time the AuthorityList type changes. -const AUTHORITIES_VERSION: u8 = 1; - -/// An AuthorityList that is encoded with a version specifier. The encoding version is updated any -/// time the AuthorityList type changes. This ensures that encodings of different versions of an -/// AuthorityList are differentiable. Attempting to decode an authority list with an unknown -/// version will fail. -#[derive(Default)] -pub struct VersionedAuthorityList<'a>(Cow<'a, AuthorityList>); - -impl<'a> From for VersionedAuthorityList<'a> { - fn from(authorities: AuthorityList) -> Self { - VersionedAuthorityList(Cow::Owned(authorities)) - } -} - -impl<'a> From<&'a AuthorityList> for VersionedAuthorityList<'a> { - fn from(authorities: &'a AuthorityList) -> Self { - VersionedAuthorityList(Cow::Borrowed(authorities)) - } -} - -impl<'a> Into for VersionedAuthorityList<'a> { - fn into(self) -> AuthorityList { - self.0.into_owned() - } -} - -impl<'a> Encode for VersionedAuthorityList<'a> { - fn size_hint(&self) -> usize { - (AUTHORITIES_VERSION, self.0.as_ref()).size_hint() - } - - fn using_encoded R>(&self, f: F) -> R { - (AUTHORITIES_VERSION, self.0.as_ref()).using_encoded(f) - } -} - -impl<'a> Decode for VersionedAuthorityList<'a> { - fn decode(value: &mut I) -> Result { - let (version, authorities): (u8, AuthorityList) = Decode::decode(value)?; - if version != AUTHORITIES_VERSION { - return Err("unknown Grandpa authorities version".into()) - } - Ok(authorities.into()) - } -} - /// An opaque type used to represent the key ownership proof at the runtime API /// boundary. The inner value is an encoded representation of the actual key /// ownership proof which will be parameterized when defining the runtime. At -- GitLab From 60c77a2e9a0e611a227fd2aa82bba1491ea8ba9c Mon Sep 17 00:00:00 2001 From: gupnik <17176722+gupnik@users.noreply.github.com> Date: Mon, 13 Nov 2023 19:14:41 +0530 Subject: [PATCH 09/74] Adds syntax for marking calls feeless (#1926) Fixes https://github.com/paritytech/polkadot-sdk/issues/1725 This PR adds the following changes: 1. An attribute `pallet::feeless_if` that can be optionally attached to a call like so: ```rust #[pallet::feeless_if(|_origin: &OriginFor, something: &u32| -> bool { *something == 0 })] pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { .... } ``` The closure passed accepts references to arguments as specified in the call fn. It returns a boolean that denotes the conditions required for this call to be "feeless". 2. A signed extension `SkipCheckIfFeeless` that wraps a transaction payment processor such as `pallet_transaction_payment::ChargeTransactionPayment`. It checks for all calls annotated with `pallet::feeless_if` to see if the conditions are met. If so, the wrapped signed extension is not called, essentially making the call feeless. In order to use this, you can simply replace your existing signed extension that manages transaction payment like so: ```diff - pallet_transaction_payment::ChargeTransactionPayment, + pallet_skip_feeless_payment::SkipCheckIfFeeless< + Runtime, + pallet_transaction_payment::ChargeTransactionPayment, + >, ``` ### Todo - [x] Tests - [x] Docs - [x] Prdoc --------- Co-authored-by: Nikhil Gupta <> Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Francisco Aguirre Co-authored-by: Liam Aharon --- Cargo.lock | 15 ++ Cargo.toml | 1 + prdoc/pr_1926.prdoc | 30 ++++ substrate/bin/node/cli/Cargo.toml | 3 + substrate/bin/node/cli/src/service.rs | 38 +++-- substrate/bin/node/runtime/Cargo.toml | 4 + substrate/bin/node/runtime/src/lib.rs | 16 +- substrate/bin/node/testing/Cargo.toml | 1 + substrate/bin/node/testing/src/keyring.rs | 4 +- .../frame/examples/kitchensink/src/lib.rs | 4 + .../src/construct_runtime/expand/call.rs | 12 ++ substrate/frame/support/procedural/src/lib.rs | 30 ++++ .../procedural/src/pallet/expand/call.rs | 27 ++++ .../procedural/src/pallet/parse/call.rs | 147 +++++++++++++++--- substrate/frame/support/src/dispatch.rs | 14 ++ substrate/frame/support/src/lib.rs | 7 +- .../call_feeless_invalid_closure_arg1.rs | 37 +++++ .../call_feeless_invalid_closure_arg1.stderr | 5 + .../call_feeless_invalid_closure_arg2.rs | 37 +++++ .../call_feeless_invalid_closure_arg2.stderr | 5 + .../call_feeless_invalid_closure_arg3.rs | 37 +++++ .../call_feeless_invalid_closure_arg3.stderr | 5 + .../call_feeless_invalid_closure_return.rs | 37 +++++ ...call_feeless_invalid_closure_return.stderr | 5 + .../pallet_ui/call_feeless_invalid_type.rs | 37 +++++ .../call_feeless_invalid_type.stderr | 11 ++ .../tests/pallet_ui/call_invalid_attr.stderr | 2 +- .../pallet_ui/call_invalid_origin_type.stderr | 6 - .../test/tests/pallet_ui/pass/feeless_call.rs | 37 +++++ .../skip-feeless-payment/Cargo.toml | 44 ++++++ .../skip-feeless-payment/src/lib.rs | 145 +++++++++++++++++ .../skip-feeless-payment/src/mock.rs | 92 +++++++++++ .../skip-feeless-payment/src/tests.rs | 33 ++++ 33 files changed, 874 insertions(+), 54 deletions(-) create mode 100644 prdoc/pr_1926.prdoc create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.rs create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.stderr create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.rs create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.stderr create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.rs create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.stderr create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.rs create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.stderr create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.rs create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.stderr create mode 100644 substrate/frame/support/test/tests/pallet_ui/pass/feeless_call.rs create mode 100644 substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml create mode 100644 substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs create mode 100644 substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs create mode 100644 substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 2a091ce6817..dbdc2f856c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7224,6 +7224,7 @@ dependencies = [ "pallet-scheduler", "pallet-session", "pallet-session-benchmarking", + "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", @@ -8853,6 +8854,7 @@ dependencies = [ "pallet-asset-conversion-tx-payment", "pallet-asset-tx-payment", "pallet-assets", + "pallet-skip-feeless-payment", "parity-scale-codec", "sc-block-builder", "sc-client-api", @@ -10795,6 +10797,18 @@ dependencies = [ "sp-std 8.0.0", ] +[[package]] +name = "pallet-skip-feeless-payment" +version = "1.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-runtime", + "sp-std 8.0.0", +] + [[package]] name = "pallet-society" version = "4.0.0-dev" @@ -18203,6 +18217,7 @@ dependencies = [ "pallet-assets", "pallet-balances", "pallet-im-online", + "pallet-skip-feeless-payment", "pallet-timestamp", "parity-scale-codec", "platforms", diff --git a/Cargo.toml b/Cargo.toml index 42bbac37a6c..30445bd5945 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -382,6 +382,7 @@ members = [ "substrate/frame/transaction-payment/asset-tx-payment", "substrate/frame/transaction-payment/rpc", "substrate/frame/transaction-payment/rpc/runtime-api", + "substrate/frame/transaction-payment/skip-feeless-payment", "substrate/frame/transaction-storage", "substrate/frame/treasury", "substrate/frame/try-runtime", diff --git a/prdoc/pr_1926.prdoc b/prdoc/pr_1926.prdoc new file mode 100644 index 00000000000..9dc656f1260 --- /dev/null +++ b/prdoc/pr_1926.prdoc @@ -0,0 +1,30 @@ +title: Adds syntax for marking calls feeless + +doc: + - audience: Core Dev + description: | + 1. Adds an attribute `#[pallet::feeless_if]` that can be optionally attached to a `pallet::call`. + 2. Adds a signed extension SkipCheckIfFeeless that wraps a transaction + payment processor to potentially skip payment fees for such calls. + Note that both the attribute and the signed extension are needed to make the call feeless. + +migrations: + db: [] + + runtime: [] + +crates: + - name: "frame-support-procedural" + semver: minor + - name: "pallet-skip-feeless-payment" + semver: major + - pallet-example-kitchensink + semver: patch + - kitchensink-runtime + semver: major + - node-testing + semver: patch + - node-cli + semver: patch + +host_functions: [] diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 5e7ffebaa8e..8f3c2185deb 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -96,6 +96,7 @@ pallet-assets = { path = "../../../frame/assets" } pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment" } pallet-im-online = { path = "../../../frame/im-online", default-features = false} +pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false} # node-specific dependencies kitchensink-runtime = { path = "../runtime" } @@ -168,6 +169,7 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", + "pallet-skip-feeless-payment/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "sc-client-db/runtime-benchmarks", "sc-service/runtime-benchmarks", @@ -183,6 +185,7 @@ try-runtime = [ "pallet-assets/try-runtime", "pallet-balances/try-runtime", "pallet-im-online/try-runtime", + "pallet-skip-feeless-payment/try-runtime", "pallet-timestamp/try-runtime", "sp-runtime/try-runtime", "substrate-cli-test-utils/try-runtime", diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 153dda5c0a5..1c71b5a3956 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -91,21 +91,24 @@ pub fn create_extrinsic( .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let extra: kitchensink_runtime::SignedExtra = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(generic::Era::mortal( - period, - best_block.saturated_into(), - )), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from( - tip, None, - ), - ); + let extra: kitchensink_runtime::SignedExtra = + ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(generic::Era::mortal( + period, + best_block.saturated_into(), + )), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::< + kitchensink_runtime::Runtime, + >::from(tip, None), + ), + ); let raw_payload = kitchensink_runtime::SignedPayload::from_raw( function.clone(), @@ -879,8 +882,9 @@ mod tests { let check_era = frame_system::CheckEra::from(Era::Immortal); let check_nonce = frame_system::CheckNonce::from(index); let check_weight = frame_system::CheckWeight::new(); - let tx_payment = - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None); + let tx_payment = pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None), + ); let extra = ( check_non_zero_sender, check_spec_version, diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 836f90e7654..2414358b60b 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -129,6 +129,7 @@ pallet-transaction-payment = { path = "../../../frame/transaction-payment", defa pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false} pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment", default-features = false} pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment", default-features = false} +pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false} pallet-transaction-storage = { path = "../../../frame/transaction-storage", default-features = false} pallet-uniques = { path = "../../../frame/uniques", default-features = false} pallet-vesting = { path = "../../../frame/vesting", default-features = false} @@ -212,6 +213,7 @@ std = [ "pallet-scheduler/std", "pallet-session-benchmarking?/std", "pallet-session/std", + "pallet-skip-feeless-payment/std", "pallet-society/std", "pallet-staking-runtime-api/std", "pallet-staking/std", @@ -308,6 +310,7 @@ runtime-benchmarks = [ "pallet-salary/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-session-benchmarking/runtime-benchmarks", + "pallet-skip-feeless-payment/runtime-benchmarks", "pallet-society/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-state-trie-migration/runtime-benchmarks", @@ -381,6 +384,7 @@ try-runtime = [ "pallet-salary/try-runtime", "pallet-scheduler/try-runtime", "pallet-session/try-runtime", + "pallet-skip-feeless-payment/try-runtime", "pallet-society/try-runtime", "pallet-staking/try-runtime", "pallet-state-trie-migration/try-runtime", diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 620e89a65e5..e9adc48ff9c 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -569,6 +569,10 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime { pallet_asset_conversion_tx_payment::AssetConversionAdapter; } +impl pallet_skip_feeless_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + parameter_types! { pub const MinimumPeriod: Moment = SLOT_DURATION / 2; } @@ -1394,7 +1398,11 @@ where frame_system::CheckEra::::from(era), frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from(tip, None), + pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from( + tip, None, + ), + ), ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { @@ -2134,6 +2142,7 @@ construct_runtime!( Statement: pallet_statement, Broker: pallet_broker, Mixnet: pallet_mixnet, + SkipFeelessPayment: pallet_skip_feeless_payment, } ); @@ -2160,7 +2169,10 @@ pub type SignedExtra = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + pallet_skip_feeless_payment::SkipCheckIfFeeless< + Runtime, + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + >, ); /// Unchecked extrinsic type as expected by this runtime. diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 68f80ab6e83..e4fb06b5dcd 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -26,6 +26,7 @@ pallet-asset-conversion = { path = "../../../frame/asset-conversion" } pallet-assets = { path = "../../../frame/assets" } pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment" } +pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment" } sc-block-builder = { path = "../../../client/block-builder" } sc-client-api = { path = "../../../client/api" } sc-client-db = { path = "../../../client/db", features = ["rocksdb"]} diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs index 22a8f5deb19..9940077c9da 100644 --- a/substrate/bin/node/testing/src/keyring.rs +++ b/substrate/bin/node/testing/src/keyring.rs @@ -78,7 +78,9 @@ pub fn signed_extra(nonce: Nonce, extra_fee: Balance) -> SignedExtra { frame_system::CheckEra::from(Era::mortal(256, 0)), frame_system::CheckNonce::from(nonce), frame_system::CheckWeight::new(), - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), + pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), + ), ) } diff --git a/substrate/frame/examples/kitchensink/src/lib.rs b/substrate/frame/examples/kitchensink/src/lib.rs index 56117c59dc6..89759dd0bf6 100644 --- a/substrate/frame/examples/kitchensink/src/lib.rs +++ b/substrate/frame/examples/kitchensink/src/lib.rs @@ -206,6 +206,10 @@ pub mod pallet { impl Pallet { #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::set_foo_benchmark())] + /// Marks this call as feeless if `new_foo` is zero. + #[pallet::feeless_if(|_origin: &OriginFor, new_foo: &u32, _other_compact: &u128| -> bool { + *new_foo == 0 + })] pub fn set_foo( _: OriginFor, new_foo: u32, diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs index 859b9a327e4..ce2aa094279 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -124,6 +124,18 @@ pub fn expand_outer_dispatch( } } + impl #scrate::dispatch::CheckIfFeeless for RuntimeCall { + type Origin = #system_path::pallet_prelude::OriginFor<#runtime>; + fn is_feeless(&self, origin: &Self::Origin) -> bool { + match self { + #( + #pallet_attrs + #variant_patterns => call.is_feeless(origin), + )* + } + } + } + impl #scrate::traits::GetCallMetadata for RuntimeCall { fn get_call_metadata(&self) -> #scrate::traits::CallMetadata { use #scrate::traits::GetCallName; diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 68bf3e4874b..ec411891885 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -1157,6 +1157,36 @@ pub fn call_index(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// Each dispatchable may be annotated with the `#[pallet::feeless_if($closure)]` attribute, +/// which explicitly defines the condition for the dispatchable to be feeless. +/// +/// The arguments for the closure must be the referenced arguments of the dispatchable function. +/// +/// The closure must return `bool`. +/// +/// ### Example +/// ```ignore +/// #[pallet::feeless_if(|_origin: &OriginFor, something: &u32| -> bool { +/// *something == 0 +/// })] +/// pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { +/// .... +/// } +/// ``` +/// +/// Please note that this only works for signed dispatchables and requires a signed extension +/// such as `SkipCheckIfFeeless` as defined in `pallet-skip-feeless-payment` to wrap the existing +/// payment extension. Else, this is completely ignored and the dispatchable is still charged. +/// +/// ### Macro expansion +/// +/// The macro implements the `CheckIfFeeless` trait on the dispatchable and calls the corresponding +/// closure in the implementation. +#[proc_macro_attribute] +pub fn feeless_if(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + /// Allows you to define some extra constants to be added into constant metadata. /// /// Item must be defined as: diff --git a/substrate/frame/support/procedural/src/pallet/expand/call.rs b/substrate/frame/support/procedural/src/pallet/expand/call.rs index ed6335159cd..cf302faafc7 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/call.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/call.rs @@ -241,6 +241,16 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { }) .collect::>(); + let feeless_check = methods.iter().map(|method| &method.feeless_check).collect::>(); + let feeless_check_result = + feeless_check.iter().zip(args_name.iter()).map(|(feeless_check, arg_name)| { + if let Some(feeless_check) = feeless_check { + quote::quote!(#feeless_check(origin, #( #arg_name, )*)) + } else { + quote::quote!(false) + } + }); + quote::quote_spanned!(span => mod warnings { #( @@ -347,6 +357,23 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { } } + impl<#type_impl_gen> #frame_support::dispatch::CheckIfFeeless for #call_ident<#type_use_gen> + #where_clause + { + type Origin = #frame_system::pallet_prelude::OriginFor; + #[allow(unused_variables)] + fn is_feeless(&self, origin: &Self::Origin) -> bool { + match *self { + #( + Self::#fn_name { #( #args_name_pattern_ref, )* } => { + #feeless_check_result + }, + )* + Self::__Ignore(_, _) => unreachable!("__Ignore cannot be used"), + } + } + } + impl<#type_impl_gen> #frame_support::traits::GetCallName for #call_ident<#type_use_gen> #where_clause { diff --git a/substrate/frame/support/procedural/src/pallet/parse/call.rs b/substrate/frame/support/procedural/src/pallet/parse/call.rs index 90631f264b9..519e1e61895 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/call.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/call.rs @@ -17,9 +17,10 @@ use super::{helper, InheritedCallWeightAttr}; use frame_support_procedural_tools::get_doc_literals; +use proc_macro2::Span; use quote::ToTokens; use std::collections::HashMap; -use syn::spanned::Spanned; +use syn::{spanned::Spanned, ExprClosure}; /// List of additional token to be used for parsing. mod keyword { @@ -30,6 +31,7 @@ mod keyword { syn::custom_keyword!(compact); syn::custom_keyword!(T); syn::custom_keyword!(pallet); + syn::custom_keyword!(feeless_if); } /// Definition of dispatchables typically `impl Pallet { ... }` @@ -82,13 +84,18 @@ pub struct CallVariantDef { pub docs: Vec, /// Attributes annotated at the top of the dispatchable function. pub attrs: Vec, + /// The optional `feeless_if` attribute on the `pallet::call`. + pub feeless_check: Option, } /// Attributes for functions in call impl block. -/// Parse for `#[pallet::weight(expr)]` or `#[pallet::call_index(expr)] pub enum FunctionAttr { + /// Parse for `#[pallet::call_index(expr)]` CallIndex(u8), + /// Parse for `#[pallet::weight(expr)]` Weight(syn::Expr), + /// Parse for `#[pallet::feeless_if(expr)]` + FeelessIf(Span, syn::ExprClosure), } impl syn::parse::Parse for FunctionAttr { @@ -115,6 +122,19 @@ impl syn::parse::Parse for FunctionAttr { return Err(syn::Error::new(index.span(), msg)) } Ok(FunctionAttr::CallIndex(index.base10_parse()?)) + } else if lookahead.peek(keyword::feeless_if) { + content.parse::()?; + let closure_content; + syn::parenthesized!(closure_content in content); + Ok(FunctionAttr::FeelessIf( + closure_content.span(), + closure_content.parse::().map_err(|e| { + let msg = "Invalid feeless_if attribute: expected a closure"; + let mut err = syn::Error::new(closure_content.span(), msg); + err.combine(e); + err + })?, + )) } else { Err(lookahead.error()) } @@ -138,28 +158,33 @@ impl syn::parse::Parse for ArgAttrIsCompact { } } -/// Check the syntax is `OriginFor` -pub fn check_dispatchable_first_arg_type(ty: &syn::Type) -> syn::Result<()> { - pub struct CheckDispatchableFirstArg; +/// Check the syntax is `OriginFor` or `&OriginFor`. +pub fn check_dispatchable_first_arg_type(ty: &syn::Type, is_ref: bool) -> syn::Result<()> { + pub struct CheckDispatchableFirstArg(bool); impl syn::parse::Parse for CheckDispatchableFirstArg { fn parse(input: syn::parse::ParseStream) -> syn::Result { + let is_ref = input.parse::().is_ok(); input.parse::()?; input.parse::()?; input.parse::()?; input.parse::]>()?; - Ok(Self) + Ok(Self(is_ref)) } } - syn::parse2::(ty.to_token_stream()).map_err(|e| { - let msg = "Invalid type: expected `OriginFor`"; - let mut err = syn::Error::new(ty.span(), msg); - err.combine(e); - err - })?; - - Ok(()) + let result = syn::parse2::(ty.to_token_stream()); + return match result { + Ok(CheckDispatchableFirstArg(has_ref)) if is_ref == has_ref => Ok(()), + _ => { + let msg = if is_ref { + "Invalid type: expected `&OriginFor`" + } else { + "Invalid type: expected `OriginFor`" + }; + return Err(syn::Error::new(ty.span(), msg)) + }, + } } impl CallDef { @@ -215,7 +240,7 @@ impl CallDef { return Err(syn::Error::new(method.sig.span(), msg)) }, Some(syn::FnArg::Typed(arg)) => { - check_dispatchable_first_arg_type(&arg.ty)?; + check_dispatchable_first_arg_type(&arg.ty, false)?; }, } @@ -227,16 +252,22 @@ impl CallDef { return Err(syn::Error::new(method.sig.span(), msg)) } - let (mut weight_attrs, mut call_idx_attrs): (Vec, Vec) = - helper::take_item_pallet_attrs(&mut method.attrs)?.into_iter().partition( - |attr| { - if let FunctionAttr::Weight(_) = attr { - true - } else { - false - } + let mut call_idx_attrs = vec![]; + let mut weight_attrs = vec![]; + let mut feeless_attrs = vec![]; + for attr in helper::take_item_pallet_attrs(&mut method.attrs)?.into_iter() { + match attr { + FunctionAttr::CallIndex(_) => { + call_idx_attrs.push(attr); }, - ); + FunctionAttr::Weight(_) => { + weight_attrs.push(attr); + }, + FunctionAttr::FeelessIf(span, _) => { + feeless_attrs.push((span, attr)); + }, + } + } if weight_attrs.is_empty() && dev_mode { // inject a default O(1) weight when dev mode is enabled and no weight has @@ -323,6 +354,73 @@ impl CallDef { let docs = get_doc_literals(&method.attrs); + if feeless_attrs.len() > 1 { + let msg = "Invalid pallet::call, there can only be one feeless_if attribute"; + return Err(syn::Error::new(feeless_attrs[1].0, msg)) + } + let feeless_check: Option = + feeless_attrs.pop().map(|(_, attr)| match attr { + FunctionAttr::FeelessIf(_, closure) => closure, + _ => unreachable!("checked during creation of the let binding"), + }); + + if let Some(ref feeless_check) = feeless_check { + if feeless_check.inputs.len() != args.len() + 1 { + let msg = "Invalid pallet::call, feeless_if closure must have same \ + number of arguments as the dispatchable function"; + return Err(syn::Error::new(feeless_check.span(), msg)) + } + + match feeless_check.inputs.first() { + None => { + let msg = "Invalid pallet::call, feeless_if closure must have at least origin arg"; + return Err(syn::Error::new(feeless_check.span(), msg)) + }, + Some(syn::Pat::Type(arg)) => { + check_dispatchable_first_arg_type(&arg.ty, true)?; + }, + _ => { + let msg = "Invalid pallet::call, feeless_if closure first argument must be a typed argument, \ + e.g. `origin: OriginFor`"; + return Err(syn::Error::new(feeless_check.span(), msg)) + }, + } + + for (feeless_arg, arg) in feeless_check.inputs.iter().skip(1).zip(args.iter()) { + let feeless_arg_type = + if let syn::Pat::Type(syn::PatType { ty, .. }) = feeless_arg.clone() { + if let syn::Type::Reference(pat) = *ty { + pat.elem.clone() + } else { + let msg = "Invalid pallet::call, feeless_if closure argument must be a reference"; + return Err(syn::Error::new(ty.span(), msg)) + } + } else { + let msg = "Invalid pallet::call, feeless_if closure argument must be a type ascription pattern"; + return Err(syn::Error::new(feeless_arg.span(), msg)) + }; + + if feeless_arg_type != arg.2 { + let msg = + "Invalid pallet::call, feeless_if closure argument must have \ + a reference to the same type as the dispatchable function argument"; + return Err(syn::Error::new(feeless_arg.span(), msg)) + } + } + + let valid_return = match &feeless_check.output { + syn::ReturnType::Type(_, type_) => match *(type_.clone()) { + syn::Type::Path(syn::TypePath { path, .. }) => path.is_ident("bool"), + _ => false, + }, + _ => false, + }; + if !valid_return { + let msg = "Invalid pallet::call, feeless_if closure must return `bool`"; + return Err(syn::Error::new(feeless_check.output.span(), msg)) + } + } + methods.push(CallVariantDef { name: method.sig.ident.clone(), weight, @@ -331,6 +429,7 @@ impl CallDef { args, docs, attrs: method.attrs.clone(), + feeless_check, }); } else { let msg = "Invalid pallet::call, only method accepted"; diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index e6a090ebcae..e57227f9b40 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -54,6 +54,20 @@ pub trait Callable { // https://github.com/rust-lang/rust/issues/51331 pub type CallableCallFor = >::RuntimeCall; +/// Means to checks if the dispatchable is feeless. +/// +/// This is automatically implemented for all dispatchables during pallet expansion. +/// If a call is marked by [`#[pallet::feeless_if]`](`macro@frame_support_procedural::feeless_if`) +/// attribute, the corresponding closure is checked. +pub trait CheckIfFeeless { + /// The Origin type of the runtime. + type Origin; + + /// Checks if the dispatchable satisfies the feeless condition as defined by + /// [`#[pallet::feeless_if]`](`macro@frame_support_procedural::feeless_if`) + fn is_feeless(&self, origin: &Self::Origin) -> bool; +} + /// Origin for the System pallet. #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] pub enum RawOrigin { diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index a01f3a01593..2ec3b24db0c 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -2227,9 +2227,10 @@ pub use frame_support_procedural::pallet; pub mod pallet_macros { pub use frame_support_procedural::{ call_index, compact, composite_enum, config, disable_frame_system_supertrait_check, error, - event, extra_constants, generate_deposit, generate_store, getter, hooks, import_section, - inherent, no_default, no_default_bounds, origin, pallet_section, storage_prefix, - storage_version, type_value, unbounded, validate_unsigned, weight, whitelist_storage, + event, extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, + import_section, inherent, no_default, no_default_bounds, origin, pallet_section, + storage_prefix, storage_version, type_value, unbounded, validate_unsigned, weight, + whitelist_storage, }; /// Allows you to define the genesis configuration for the pallet. diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.rs new file mode 100644 index 00000000000..08aaf06a7ef --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|| -> bool { true })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.stderr new file mode 100644 index 00000000000..9c13d59d793 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, feeless_if closure must have same number of arguments as the dispatchable function + --> tests/pallet_ui/call_feeless_invalid_closure_arg1.rs:31:24 + | +31 | #[pallet::feeless_if(|| -> bool { true })] + | ^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.rs new file mode 100644 index 00000000000..b16b4b3ffd9 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: bool| -> bool { true })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.stderr new file mode 100644 index 00000000000..1c38ec23683 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.stderr @@ -0,0 +1,5 @@ +error: Invalid type: expected `&OriginFor` + --> tests/pallet_ui/call_feeless_invalid_closure_arg2.rs:31:28 + | +31 | #[pallet::feeless_if(|_: bool| -> bool { true })] + | ^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.rs new file mode 100644 index 00000000000..5f2230744ff --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: &OriginFor, _s: &u32| -> bool { true })] + pub fn foo(_: OriginFor, _something: u64) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.stderr new file mode 100644 index 00000000000..1ad9588cead --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, feeless_if closure argument must have a reference to the same type as the dispatchable function argument + --> tests/pallet_ui/call_feeless_invalid_closure_arg3.rs:31:43 + | +31 | #[pallet::feeless_if(|_: &OriginFor, _s: &u32| -> bool { true })] + | ^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.rs new file mode 100644 index 00000000000..1f0399a123c --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: &OriginFor| -> u32 { 0 })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.stderr new file mode 100644 index 00000000000..a8c05242bde --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, feeless_if closure must return `bool` + --> tests/pallet_ui/call_feeless_invalid_closure_return.rs:31:43 + | +31 | #[pallet::feeless_if(|_: &OriginFor| -> u32 { 0 })] + | ^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.rs new file mode 100644 index 00000000000..26bd8a600ab --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(0)] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.stderr new file mode 100644 index 00000000000..add3decbf16 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.stderr @@ -0,0 +1,11 @@ +error: Invalid feeless_if attribute: expected a closure + --> tests/pallet_ui/call_feeless_invalid_type.rs:31:24 + | +31 | #[pallet::feeless_if(0)] + | ^ + +error: expected `|` + --> tests/pallet_ui/call_feeless_invalid_type.rs:31:24 + | +31 | #[pallet::feeless_if(0)] + | ^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr b/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr index eec5e33ccbd..1809fcb6ed9 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr @@ -1,4 +1,4 @@ -error: expected `weight` or `call_index` +error: expected one of: `weight`, `call_index`, `feeless_if` --> tests/pallet_ui/call_invalid_attr.rs:31:13 | 31 | #[pallet::weird_attr] diff --git a/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr b/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr index 99146c0563a..c04729a2438 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr @@ -3,9 +3,3 @@ error: Invalid type: expected `OriginFor` | 34 | pub fn foo(origin: u8) {} | ^^ - -error: expected `OriginFor` - --> tests/pallet_ui/call_invalid_origin_type.rs:34:22 - | -34 | pub fn foo(origin: u8) {} - | ^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/feeless_call.rs b/substrate/frame/support/test/tests/pallet_ui/pass/feeless_call.rs new file mode 100644 index 00000000000..566b7c65cc7 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/pass/feeless_call.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: &OriginFor| -> bool { true })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml new file mode 100644 index 00000000000..cfb814e2e38 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "pallet-skip-feeless-payment" +version = "1.0.0-dev" +authors.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true +description = "Pallet to skip payments for calls annotated with `feeless_if` if the respective conditions are satisfied." + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +# Substrate dependencies +sp-runtime = { path = "../../../primitives/runtime", default-features = false} +sp-std = { path = "../../../primitives/std", default-features = false} + +frame-support = { path = "../../support", default-features = false} +frame-system = { path = "../../system", default-features = false} + +# Other dependencies +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } + +[features] +default = [ "std" ] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs new file mode 100644 index 00000000000..923c7e7ebc2 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -0,0 +1,145 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//! # Skip Feeless Payment Pallet +//! +//! This pallet allows runtimes that include it to skip payment of transaction fees for +//! dispatchables marked by [`#[pallet::feeless_if]`](`macro@ +//! frame_support::pallet_prelude::feeless_if`). +//! +//! ## Overview +//! +//! It does this by wrapping an existing [`SignedExtension`] implementation (e.g. +//! [`pallet-transaction-payment`]) and checking if the dispatchable is feeless before applying the +//! wrapped extension. If the dispatchable is indeed feeless, the extension is skipped and a custom +//! event is emitted instead. Otherwise, the extension is applied as usual. +//! +//! +//! ## Integration +//! +//! This pallet wraps an existing transaction payment pallet. This means you should both pallets +//! in your `construct_runtime` macro and include this pallet's +//! [`SignedExtension`] ([`SkipCheckIfFeeless`]) that would accept the existing one as an argument. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{CheckIfFeeless, DispatchResult}, + traits::{IsType, OriginTrait}, +}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{DispatchInfoOf, PostDispatchInfoOf, SignedExtension}, + transaction_validity::TransactionValidityError, +}; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A transaction fee was skipped. + FeeSkipped { who: T::AccountId }, + } +} + +/// A [`SignedExtension`] that skips the wrapped extension if the dispatchable is feeless. +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct SkipCheckIfFeeless(pub S, sp_std::marker::PhantomData); + +impl sp_std::fmt::Debug for SkipCheckIfFeeless { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "SkipCheckIfFeeless<{:?}>", self.0.encode()) + } + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +impl SkipCheckIfFeeless { + /// utility constructor. Used only in client/factory code. + pub fn from(s: S) -> Self { + Self(s, sp_std::marker::PhantomData) + } +} + +impl> SignedExtension + for SkipCheckIfFeeless +where + S::Call: CheckIfFeeless>, +{ + type AccountId = T::AccountId; + type Call = S::Call; + type AdditionalSigned = S::AdditionalSigned; + type Pre = (Self::AccountId, Option<::Pre>); + const IDENTIFIER: &'static str = "SkipCheckIfFeeless"; + + fn additional_signed(&self) -> Result { + self.0.additional_signed() + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + if call.is_feeless(&::RuntimeOrigin::signed(who.clone())) { + Ok((who.clone(), None)) + } else { + Ok((who.clone(), Some(self.0.pre_dispatch(who, call, info, len)?))) + } + } + + fn post_dispatch( + pre: Option, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + if let Some(pre) = pre { + if let Some(pre) = pre.1 { + S::post_dispatch(Some(pre), info, post_info, len, result)?; + } else { + Pallet::::deposit_event(Event::::FeeSkipped { who: pre.0 }); + } + } + Ok(()) + } +} diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs new file mode 100644 index 00000000000..5c540c3e459 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as pallet_skip_feeless_payment; + +use frame_support::{derive_impl, parameter_types}; +use frame_system as system; + +type Block = frame_system::mocking::MockBlock; +type AccountId = u64; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; +} + +impl Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + +parameter_types! { + pub static PreDispatchCount: u32 = 0; +} + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] +pub struct DummyExtension; + +impl SignedExtension for DummyExtension { + type AccountId = AccountId; + type Call = RuntimeCall; + type AdditionalSigned = (); + type Pre = (); + const IDENTIFIER: &'static str = "DummyExtension"; + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } + fn pre_dispatch( + self, + _who: &Self::AccountId, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result { + PreDispatchCount::mutate(|c| *c += 1); + Ok(()) + } +} + +#[frame_support::pallet(dev_mode)] +pub mod pallet_dummy { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_origin: &OriginFor, data: &u32| -> bool { + *data == 0 + })] + pub fn aux(_origin: OriginFor, #[pallet::compact] _data: u32) -> DispatchResult { + unreachable!() + } + } +} + +impl pallet_dummy::Config for Runtime {} + +frame_support::construct_runtime!( + pub struct Runtime { + System: system, + SkipFeeless: pallet_skip_feeless_payment, + DummyPallet: pallet_dummy, + } +); diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs new file mode 100644 index 00000000000..4b4dd699741 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs @@ -0,0 +1,33 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate::mock::{pallet_dummy::Call, DummyExtension, PreDispatchCount, Runtime, RuntimeCall}; +use frame_support::dispatch::DispatchInfo; + +#[test] +fn skip_feeless_payment_works() { + let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); + SkipCheckIfFeeless::::from(DummyExtension) + .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .unwrap(); + assert_eq!(PreDispatchCount::get(), 1); + + let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); + SkipCheckIfFeeless::::from(DummyExtension) + .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .unwrap(); + assert_eq!(PreDispatchCount::get(), 1); +} -- GitLab From 29654a4d7154296e8e6cb2a067896490d67d96a1 Mon Sep 17 00:00:00 2001 From: gupnik <17176722+gupnik@users.noreply.github.com> Date: Mon, 13 Nov 2023 19:56:33 +0530 Subject: [PATCH 10/74] Skip zombienet CI job until PolkadotJS includes `SkipCheckIfFeeless` extension (#2294) --- .gitlab/pipeline/zombienet/substrate.yml | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/.gitlab/pipeline/zombienet/substrate.yml b/.gitlab/pipeline/zombienet/substrate.yml index 9fb2f161ad7..9e14ebe0852 100644 --- a/.gitlab/pipeline/zombienet/substrate.yml +++ b/.gitlab/pipeline/zombienet/substrate.yml @@ -38,13 +38,14 @@ tags: - zombienet-polkadot-integration-test -zombienet-substrate-0000-block-building: - extends: - - .zombienet-substrate-common - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/0000-block-building" - --test="block-building.zndsl" +# Skip this one until PolkadotJS includes `SkipCheckIfFeeless` extension +# zombienet-substrate-0000-block-building: +# extends: +# - .zombienet-substrate-common +# script: +# - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh +# --local-dir="${LOCAL_DIR}/0000-block-building" +# --test="block-building.zndsl" zombienet-substrate-0001-basic-warp-sync: extends: -- GitLab From 18257373b313fcfad4f7a2ab2cb02cb8bfc5b8f1 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Mon, 13 Nov 2023 17:16:55 +0200 Subject: [PATCH 11/74] pallet-xcm: enhance `reserve_transfer_assets` to support remote reserves (#1672) ## Motivation `pallet-xcm` is the main user-facing interface for XCM functionality, including assets manipulation functions like `teleportAssets()` and `reserve_transfer_assets()` calls. While `teleportAsset()` works both ways, `reserve_transfer_assets()` works only for sending reserve-based assets to a remote destination and beneficiary when the reserve is the _local chain_. ## Solution This PR enhances `pallet_xcm::(limited_)reserve_withdraw_assets` to support transfers when reserves are other chains. This will allow complete, **bi-directional** reserve-based asset transfers user stories using `pallet-xcm`. Enables following scenarios: - transferring assets with local reserve (was previously supported iff asset used as fee also had local reserve - now it works in all cases), - transferring assets with reserve on destination, - transferring assets with reserve on remote/third-party chain (iff assets and fees have same remote reserve), - transferring assets with reserve different than the reserve of the asset to be used as fees - meaning can be used to transfer random asset with local/dest reserve while using DOT for fees on all involved chains, even if DOT local/dest reserve doesn't match asset reserve, - transferring assets with any type of local/dest reserve while using fees which can be teleported between involved chains. All of the above is done by pallet inner logic without the user having to specify which scenario/reserves/teleports/etc. The correct scenario and corresponding XCM programs are identified, and respectively, built automatically based on runtime configuration of trusted teleporters and trusted reserves. #### Current limitations: - while `fees` and "non-fee" `assets` CAN have different reserves (or fees CAN be teleported), the remaining "non-fee" `assets` CANNOT, among themselves, have different reserve locations (this is also implicitly enforced by `MAX_ASSETS_FOR_TRANSFER=2`, but this can be safely increased in the future). - `fees` and "non-fee" `assets` CANNOT have **different remote** reserves (this could also be supported in the future, but adds even more complexity while possibly not being worth it - we'll see what the future holds). Fixes https://github.com/paritytech/polkadot-sdk/issues/1584 Fixes https://github.com/paritytech/polkadot-sdk/issues/2055 --------- Co-authored-by: Francisco Aguirre Co-authored-by: Branislav Kontur --- Cargo.lock | 5 + .../runtime/src/xcm_config.rs | 7 - .../assets/asset-hub-rococo/src/lib.rs | 6 +- .../assets/asset-hub-westend/src/lib.rs | 6 +- .../assets/asset-hub-wococo/src/lib.rs | 6 +- .../bridges/bridge-hub-rococo/src/lib.rs | 2 +- .../bridges/bridge-hub-westend/src/lib.rs | 2 +- .../bridges/bridge-hub-wococo/src/lib.rs | 2 +- .../parachains/testing/penpal/Cargo.toml | 1 + .../parachains/testing/penpal/src/lib.rs | 12 +- .../emulated/chains/relays/westend/src/lib.rs | 2 +- .../emulated/common/src/impls.rs | 14 +- .../tests/assets/asset-hub-rococo/Cargo.toml | 4 +- .../tests/assets/asset-hub-rococo/src/lib.rs | 25 +- .../src/tests/reserve_transfer.rs | 495 +++--- .../asset-hub-rococo/src/tests/teleport.rs | 18 +- .../tests/assets/asset-hub-westend/src/lib.rs | 4 +- .../src/tests/reserve_transfer.rs | 359 +---- .../asset-hub-westend/src/tests/teleport.rs | 2 +- .../assets/asset-hub-kusama/src/lib.rs | 36 +- .../assets/asset-hub-kusama/src/xcm_config.rs | 7 - .../assets/asset-hub-kusama/tests/tests.rs | 40 +- .../assets/asset-hub-polkadot/src/lib.rs | 36 +- .../asset-hub-polkadot/src/xcm_config.rs | 7 - .../assets/asset-hub-polkadot/tests/tests.rs | 40 +- .../assets/asset-hub-rococo/src/lib.rs | 36 +- .../assets/asset-hub-rococo/src/xcm_config.rs | 7 - .../assets/asset-hub-rococo/tests/tests.rs | 35 +- .../assets/asset-hub-westend/src/lib.rs | 36 +- .../asset-hub-westend/src/xcm_config.rs | 7 - .../assets/asset-hub-westend/tests/tests.rs | 35 +- .../runtimes/assets/test-utils/src/lib.rs | 15 + .../assets/test-utils/src/test_cases.rs | 288 +++- .../test-utils/src/test_cases_over_bridge.rs | 88 +- .../parachains/runtimes/bridge-hubs/README.md | 16 +- .../bridge-hubs/bridge-hub-kusama/src/lib.rs | 26 +- .../bridge-hub-kusama/src/xcm_config.rs | 7 - .../bridge-hub-kusama/tests/tests.rs | 6 - .../bridge-hub-polkadot/src/lib.rs | 26 +- .../bridge-hub-polkadot/src/xcm_config.rs | 7 - .../bridge-hub-polkadot/tests/tests.rs | 6 - .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 26 +- .../bridge-hub-rococo/src/xcm_config.rs | 7 - .../bridge-hub-rococo/tests/tests.rs | 12 - .../bridge-hubs/bridge-hub-westend/src/lib.rs | 26 +- .../bridge-hub-westend/src/xcm_config.rs | 7 - .../bridge-hub-westend/tests/tests.rs | 6 - .../collectives-polkadot/src/lib.rs | 26 +- .../collectives-polkadot/src/xcm_config.rs | 7 - .../contracts/contracts-rococo/src/lib.rs | 27 +- .../contracts-rococo/src/xcm_config.rs | 7 - .../runtimes/testing/penpal/src/lib.rs | 11 +- .../runtimes/testing/penpal/src/xcm_config.rs | 84 +- .../testing/rococo-parachain/src/lib.rs | 7 - cumulus/scripts/bridges_rococo_westend.sh | 28 +- cumulus/xcm/xcm-emulator/src/lib.rs | 8 +- polkadot/runtime/parachains/src/paras/mod.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 32 +- polkadot/runtime/rococo/src/xcm_config.rs | 7 - .../runtime/test-runtime/src/xcm_config.rs | 7 - polkadot/runtime/westend/src/lib.rs | 28 +- polkadot/runtime/westend/src/xcm_config.rs | 7 - polkadot/xcm/pallet-xcm/Cargo.toml | 10 +- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 165 +- polkadot/xcm/pallet-xcm/src/lib.rs | 551 ++++++- polkadot/xcm/pallet-xcm/src/mock.rs | 203 ++- .../pallet-xcm/src/tests/assets_transfer.rs | 1405 +++++++++++++++++ .../pallet-xcm/src/{tests.rs => tests/mod.rs} | 375 +---- polkadot/xcm/src/v3/multilocation.rs | 66 + polkadot/xcm/xcm-builder/src/barriers.rs | 13 +- .../xcm/xcm-builder/src/tests/pay/mock.rs | 7 - polkadot/xcm/xcm-builder/tests/mock/mod.rs | 7 - polkadot/xcm/xcm-executor/Cargo.toml | 2 + polkadot/xcm/xcm-executor/src/lib.rs | 8 +- .../xcm-executor/src/traits/asset_transfer.rs | 90 ++ polkadot/xcm/xcm-executor/src/traits/mod.rs | 6 +- .../xcm-simulator/example/src/parachain.rs | 7 - .../xcm-simulator/example/src/relay_chain.rs | 7 - .../xcm/xcm-simulator/fuzzer/src/parachain.rs | 7 - .../xcm-simulator/fuzzer/src/relay_chain.rs | 7 - 80 files changed, 3650 insertions(+), 1437 deletions(-) create mode 100644 polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs rename polkadot/xcm/pallet-xcm/src/{tests.rs => tests/mod.rs} (68%) create mode 100644 polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs diff --git a/Cargo.lock b/Cargo.lock index dbdc2f856c4..4a45d5c602e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -901,9 +901,11 @@ dependencies = [ "pallet-asset-conversion", "pallet-assets", "pallet-balances", + "pallet-message-queue", "pallet-xcm", "parachains-common", "parity-scale-codec", + "penpal-runtime", "rococo-runtime", "rococo-system-emulated-network", "sp-runtime", @@ -11183,6 +11185,7 @@ dependencies = [ "frame-support", "frame-system", "log", + "pallet-assets", "pallet-balances", "parity-scale-codec", "polkadot-parachain-primitives", @@ -11630,6 +11633,7 @@ dependencies = [ "frame-support", "parachains-common", "penpal-runtime", + "rococo-emulated-chain", "serde_json", "sp-core", "sp-runtime", @@ -18416,6 +18420,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "parity-scale-codec", + "scale-info", "sp-arithmetic", "sp-core", "sp-io", diff --git a/cumulus/parachain-template/runtime/src/xcm_config.rs b/cumulus/parachain-template/runtime/src/xcm_config.rs index 353f68d22e3..752137c96f1 100644 --- a/cumulus/parachain-template/runtime/src/xcm_config.rs +++ b/cumulus/parachain-template/runtime/src/xcm_config.rs @@ -150,11 +150,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -180,8 +175,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs index 0580d61eae9..f94c4c3d255 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs @@ -21,7 +21,7 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_system_parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, }; use rococo_emulated_chain::Rococo; @@ -51,5 +51,5 @@ decl_test_parachains! { // AssetHubRococo implementation impl_accounts_helpers_for_parachain!(AssetHubRococo); -impl_assert_events_helpers_for_parachain!(AssetHubRococo); -impl_assets_helpers_for_system_parachain!(AssetHubRococo, Rococo); +impl_assert_events_helpers_for_parachain!(AssetHubRococo, false); +impl_assets_helpers_for_parachain!(AssetHubRococo, Rococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs index 804b727c33f..73d777247a5 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs @@ -21,7 +21,7 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_system_parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, }; use westend_emulated_chain::Westend; @@ -51,5 +51,5 @@ decl_test_parachains! { // AssetHubWestend implementation impl_accounts_helpers_for_parachain!(AssetHubWestend); -impl_assert_events_helpers_for_parachain!(AssetHubWestend); -impl_assets_helpers_for_system_parachain!(AssetHubWestend, Westend); +impl_assert_events_helpers_for_parachain!(AssetHubWestend, false); +impl_assets_helpers_for_parachain!(AssetHubWestend, Westend); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs index 677ca1763cf..38a6ece3472 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs @@ -19,7 +19,7 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_system_parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, }; use wococo_emulated_chain::Wococo; @@ -49,5 +49,5 @@ decl_test_parachains! { // AssetHubWococo implementation impl_accounts_helpers_for_parachain!(AssetHubWococo); -impl_assert_events_helpers_for_parachain!(AssetHubWococo); -impl_assets_helpers_for_system_parachain!(AssetHubWococo, Wococo); +impl_assert_events_helpers_for_parachain!(AssetHubWococo, false); +impl_assets_helpers_for_parachain!(AssetHubWococo, Wococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs index d7630954c86..f4557021f62 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs @@ -46,4 +46,4 @@ decl_test_parachains! { // BridgeHubRococo implementation impl_accounts_helpers_for_parachain!(BridgeHubRococo); -impl_assert_events_helpers_for_parachain!(BridgeHubRococo); +impl_assert_events_helpers_for_parachain!(BridgeHubRococo, false); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs index 436b65cb916..1f1126d4565 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs @@ -46,4 +46,4 @@ decl_test_parachains! { // BridgeHubWestend implementation impl_accounts_helpers_for_parachain!(BridgeHubWestend); -impl_assert_events_helpers_for_parachain!(BridgeHubWestend); +impl_assert_events_helpers_for_parachain!(BridgeHubWestend, false); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs index 6807a2ab8c8..e643f104aa3 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs @@ -44,4 +44,4 @@ decl_test_parachains! { // BridgeHubWococo implementation impl_accounts_helpers_for_parachain!(BridgeHubWococo); -impl_assert_events_helpers_for_parachain!(BridgeHubWococo); +impl_assert_events_helpers_for_parachain!(BridgeHubWococo, false); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml index 42aaee3f102..c55b10d7180 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml @@ -22,3 +22,4 @@ parachains-common = { path = "../../../../../../../parachains/common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } penpal-runtime = { path = "../../../../../../runtimes/testing/penpal" } +rococo-emulated-chain = { path = "../../../relays/rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index 8709d4e9196..537f96f45b4 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -21,8 +21,10 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ - impl_assert_events_helpers_for_parachain, xcm_emulator::decl_test_parachains, + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, }; +use rococo_emulated_chain::Rococo; // Penpal Parachain declaration decl_test_parachains! { @@ -40,6 +42,7 @@ decl_test_parachains! { pallets = { PolkadotXcm: penpal_runtime::PolkadotXcm, Assets: penpal_runtime::Assets, + Balances: penpal_runtime::Balances, } }, pub struct PenpalB { @@ -56,10 +59,13 @@ decl_test_parachains! { pallets = { PolkadotXcm: penpal_runtime::PolkadotXcm, Assets: penpal_runtime::Assets, + Balances: penpal_runtime::Balances, } }, } // Penpal implementation -impl_assert_events_helpers_for_parachain!(PenpalA); -impl_assert_events_helpers_for_parachain!(PenpalB); +impl_accounts_helpers_for_parachain!(PenpalA); +impl_assets_helpers_for_parachain!(PenpalA, Rococo); +impl_assert_events_helpers_for_parachain!(PenpalA, true); +impl_assert_events_helpers_for_parachain!(PenpalB, true); diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs index af45d8db4e6..d4ba1b6cfe7 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs @@ -30,7 +30,7 @@ decl_test_relay_chains! { on_init = (), runtime = westend_runtime, core = { - SovereignAccountOf: westend_runtime::xcm_config::LocationConverter, //TODO: rename to SovereignAccountOf, + SovereignAccountOf: westend_runtime::xcm_config::LocationConverter, }, pallets = { XcmPallet: westend_runtime::XcmPallet, diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 6c99c1614db..82f27b93200 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -399,7 +399,7 @@ macro_rules! impl_accounts_helpers_for_parachain { #[macro_export] macro_rules! impl_assert_events_helpers_for_parachain { - ( $chain:ident ) => { + ( $chain:ident, $ignore_weight:expr ) => { $crate::impls::paste::paste! { type [<$chain RuntimeEvent>] = <$chain as $crate::impls::Chain>::RuntimeEvent; @@ -412,7 +412,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { [<$chain RuntimeEvent>]::::PolkadotXcm( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Complete(weight) } ) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -434,7 +434,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { [<$chain RuntimeEvent>]::::PolkadotXcm( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Incomplete(weight, error) } ) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -490,7 +490,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { success: true, weight_used: weight, .. }) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -510,7 +510,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { success: false, weight_used: weight, .. }) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -541,7 +541,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { vec![ [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { success: true, weight_used: weight, .. } ) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -556,7 +556,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { } #[macro_export] -macro_rules! impl_assets_helpers_for_system_parachain { +macro_rules! impl_assets_helpers_for_parachain { ( $chain:ident, $relay_chain:ident ) => { $crate::impls::paste::paste! { impl $chain { diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index 6e592f04ba1..23f80f33f78 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -17,6 +17,7 @@ frame-support = { path = "../../../../../../../substrate/frame/support", default pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false} pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false} pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false} +pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } # Polkadot xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} @@ -28,5 +29,6 @@ rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } parachains-common = { path = "../../../../../../parachains/common" } asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo" } -emulated-integration-tests-common = { path = "../../../common", default-features = false} +emulated-integration-tests-common = { path = "../../../common", default-features = false } +penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } rococo-system-emulated-network ={ path = "../../../networks/rococo-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 11380cd1e2d..3ff8c37c646 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -61,18 +61,20 @@ pub const ASSET_MIN_BALANCE: u128 = 1000; pub const ASSETS_PALLET_ID: u8 = 50; pub type RelayToSystemParaTest = Test; +pub type RelayToParaTest = Test; pub type SystemParaToRelayTest = Test; pub type SystemParaToParaTest = Test; +pub type ParaToSystemParaTest = Test; -/// Returns a `TestArgs` instance to de used for the Relay Chain accross integraton tests -pub fn relay_test_args(amount: Balance) -> TestArgs { +/// Returns a `TestArgs` instance to be used for the Relay Chain across integration tests +pub fn relay_test_args( + dest: MultiLocation, + beneficiary_id: AccountId32, + amount: Balance, +) -> TestArgs { TestArgs { - dest: Rococo::child_location_of(AssetHubRococo::para_id()), - beneficiary: AccountId32Junction { - network: None, - id: AssetHubRococoReceiver::get().into(), - } - .into(), + dest, + beneficiary: AccountId32Junction { network: None, id: beneficiary_id.into() }.into(), amount, assets: (Here, amount).into(), asset_id: None, @@ -81,13 +83,14 @@ pub fn relay_test_args(amount: Balance) -> TestArgs { } } -/// Returns a `TestArgs` instance to de used for the System Parachain accross integraton tests -pub fn system_para_test_args( +/// Returns a `TestArgs` instance to be used by parachains across integration tests +pub fn para_test_args( dest: MultiLocation, beneficiary_id: AccountId32, amount: Balance, assets: MultiAssets, asset_id: Option, + fee_asset_item: u32, ) -> TestArgs { TestArgs { dest, @@ -95,7 +98,7 @@ pub fn system_para_test_args( amount, assets, asset_id, - fee_asset_item: 0, + fee_asset_item, weight_limit: WeightLimit::Unlimited, } } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 76d93b2dbdb..d0e9b72176b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -15,37 +15,45 @@ use crate::*; use asset_hub_rococo_runtime::xcm_config::XcmConfig as AssetHubRococoXcmConfig; +use penpal_runtime::xcm_config::XcmConfig as PenpalRococoXcmConfig; use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; -fn relay_origin_assertions(t: RelayToSystemParaTest) { +fn relay_to_para_sender_assertions(t: RelayToParaTest) { type RuntimeEvent = ::RuntimeEvent; - Rococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(630_092_000, 6_196))); + Rococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); assert_expected_events!( Rococo, vec![ - // Amount to reserve transfer is transferred to System Parachain's Sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Transfer { from, to, amount }) => { + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Transfer { from, to, amount } + ) => { from: *from == t.sender.account_id, to: *to == Rococo::sovereign_account_id_of( t.args.dest ), - amount: *amount == t.args.amount, + amount: *amount == t.args.amount, }, ] ); } -fn system_para_dest_assertions_incomplete(_t: RelayToSystemParaTest) { - AssetHubRococo::assert_dmp_queue_incomplete(Some(Weight::from_parts(57_185_000, 3504))); -} - -fn system_para_to_relay_assertions(_t: SystemParaToRelayTest) { - AssetHubRococo::assert_xcm_pallet_attempted_error(Some(XcmError::Barrier)) +fn relay_to_para_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); } -fn system_para_to_para_assertions(t: SystemParaToParaTest) { +fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; AssetHubRococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( @@ -56,7 +64,7 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubRococo, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Balances( pallet_balances::Event::Transfer { from, to, amount } ) => { @@ -70,7 +78,64 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { ); } -fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { +fn system_para_to_para_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn para_to_system_para_sender_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + PenpalA::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); + + assert_expected_events!( + PenpalA, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn para_to_system_para_receiver_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalA::para_id()), + ); + + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_penpal_on_ahr.clone().into(), + amount: *amount == t.args.amount, + }, + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; AssetHubRococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( @@ -81,7 +146,7 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubRococo, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Assets( pallet_assets::Event::Transferred { asset_id, from, to, amount } ) => { @@ -96,29 +161,22 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { ); } -fn relay_limited_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::limited_reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - -fn relay_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) +fn system_para_to_para_assets_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::Assets(pallet_assets::Event::Issued { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); } -fn system_para_limited_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::limited_reserve_transfer_assets( +fn relay_to_para_limited_reserve_transfer_assets(t: RelayToParaTest) -> DispatchResult { + ::XcmPallet::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), bx!(t.args.beneficiary.into()), @@ -128,16 +186,6 @@ fn system_para_limited_reserve_transfer_assets(t: SystemParaToRelayTest) -> Disp ) } -fn system_para_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, @@ -149,101 +197,108 @@ fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) ) } -fn system_para_to_para_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( +fn para_to_system_para_limited_reserve_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { + ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), t.args.fee_asset_item, + t.args.weight_limit, ) } -/// Limited Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't -/// work +/// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work #[test] -fn limited_reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain +fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { + let signed_origin = ::RuntimeOrigin::signed(RococoSender::get().into()); + let destination = Rococo::child_location_of(AssetHubRococo::para_id()); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: AssetHubRococoReceiver::get().into() }.into(); let amount_to_send: Balance = ROCOCO_ED * 1000; - let test_args = TestContext { - sender: RococoSender::get(), - receiver: AssetHubRococoReceiver::get(), - args: relay_test_args(amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions_incomplete); - test.set_dispatchable::(relay_limited_reserve_transfer_assets); - test.assert(); - - let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + let assets: MultiAssets = (Here, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + Rococo::execute_with(|| { + let result = ::XcmPallet::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 99, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); } -/// Limited Reserve Transfers of native asset from System Parachain to Relay Chain shoudln't work +/// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work #[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_relay_fails() { +fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { // Init values for System Parachain + let signed_origin = + ::RuntimeOrigin::signed(AssetHubRococoSender::get().into()); let destination = AssetHubRococo::parent_location(); let beneficiary_id = RococoReceiver::get(); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: beneficiary_id.into() }.into(); let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); + let assets: MultiAssets = (Parent, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + AssetHubRococo::execute_with(|| { + let result = + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); + }); } -/// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work +/// Reserve Transfers of native asset from Relay to Parachain should work #[test] -fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain +fn reserve_transfer_native_asset_from_relay_to_para() { + // Init values for Relay + let destination = Rococo::child_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); let amount_to_send: Balance = ROCOCO_ED * 1000; + let test_args = TestContext { sender: RococoSender::get(), - receiver: AssetHubRococoReceiver::get(), - args: relay_test_args(amount_to_send), + receiver: PenpalAReceiver::get(), + args: relay_test_args(destination, beneficiary_id, amount_to_send), }; - let mut test = RelayToSystemParaTest::new(test_args); + let mut test = RelayToParaTest::new(test_args); let sender_balance_before = test.sender.balance; let receiver_balance_before = test.receiver.balance; - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions_incomplete); - test.set_dispatchable::(relay_reserve_transfer_assets); + test.set_assertion::(relay_to_para_sender_assertions); + test.set_assertion::(relay_to_para_receiver_assertions); + test.set_dispatchable::(relay_to_para_limited_reserve_transfer_assets); test.assert(); let delivery_fees = Rococo::execute_with(|| { @@ -255,44 +310,15 @@ fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { let sender_balance_after = test.sender.balance; let receiver_balance_after = test.receiver.balance; + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); } -/// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work -#[test] -fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { - // Init values for System Parachain - let destination = AssetHubRococo::parent_location(); - let beneficiary_id = RococoReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Limited Reserve Transfers of native asset from System Parachain to Parachain should work +/// Reserve Transfers of native asset from System Parachain to Parachain should work #[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_para() { +fn reserve_transfer_native_asset_from_system_para_to_para() { // Init values for System Parachain let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let beneficiary_id = PenpalAReceiver::get(); @@ -302,20 +328,21 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToParaTest::new(test_args); let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` + test.set_assertion::(system_para_to_para_sender_assertions); + test.set_assertion::(system_para_to_para_receiver_assertions); test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); test.assert(); let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubRococo::execute_with(|| { xcm_helpers::transfer_assets_delivery_fees::< @@ -323,117 +350,153 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); } -/// Reserve Transfers of native asset from System Parachain to Parachain should work +/// Reserve Transfers of native asset from Parachain to System Parachain should work #[test] -fn reserve_transfer_native_asset_from_system_para_to_para() { - // Init values for System Parachain - let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); +fn reserve_transfer_native_asset_from_para_to_system_para() { + // Init values for Penpal Parachain + let destination = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; let assets = (Parent, amount_to_send).into(); let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + sender: PenpalASender::get(), + receiver: AssetHubRococoReceiver::get(), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; - let mut test = SystemParaToParaTest::new(test_args); + let mut test = ParaToSystemParaTest::new(test_args); let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + let penpal_location_as_seen_by_ahr = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location_as_seen_by_ahr); - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` - test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); + // fund the Penpal's SA on AHR with the native tokens held in reserve + AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), amount_to_send * 2)]); + + test.set_assertion::(para_to_system_para_sender_assertions); + test.set_assertion::(para_to_system_para_receiver_assertions); + test.set_dispatchable::(para_to_system_para_limited_reserve_transfer_assets); test.assert(); let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; - let delivery_fees = AssetHubRococo::execute_with(|| { + let delivery_fees = PenpalA::execute_with(|| { xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, + ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); } -/// Limited Reserve Transfers of a local asset from System Parachain to Parachain should work +/// Reserve Transfers of a local asset and native asset from System Parachain to Parachain should +/// work #[test] -fn limited_reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account +fn reserve_transfer_assets_from_system_para_to_para() { + // Force create asset on AssetHubRococo and PenpalA from Relay Chain AssetHubRococo::force_create_and_mint_asset( ASSET_ID, ASSET_MIN_BALANCE, - true, + false, AssetHubRococoSender::get(), Some(Weight::from_parts(1_019_445_000, 200_000)), - ASSET_MIN_BALANCE * 1000000, + ASSET_MIN_BALANCE * 1_000_000, + ); + PenpalA::force_create_and_mint_asset( + ASSET_ID, + ASSET_MIN_BALANCE, + false, + PenpalASender::get(), + Some(Weight::from_parts(1_019_445_000, 200_000)), + 0, ); // Init values for System Parachain let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let beneficiary_id = PenpalAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); - - let system_para_test_args = TestContext { + let fee_amount_to_send = ASSET_HUB_ROCOCO_ED * 1000; + let asset_amount_to_send = ASSET_MIN_BALANCE * 1000; + let assets: MultiAssets = vec![ + (Parent, fee_amount_to_send).into(), + (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), asset_amount_to_send) + .into(), + ] + .into(); + let fee_asset_index = assets + .inner() + .iter() + .position(|r| r == &(Parent, fee_amount_to_send).into()) + .unwrap() as u32; + + let para_test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args( + destination, + beneficiary_id, + asset_amount_to_send, + assets, + None, + fee_asset_index, + ), }; - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); + let mut test = SystemParaToParaTest::new(para_test_args); - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); - system_para_test.assert(); -} + // Create SA-of-Penpal-on-AHR with ED. + let penpal_location = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location); + AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), ROCOCO_ED)]); -/// Reserve Transfers of a local asset from System Parachain to Parachain should work -#[test] -fn reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account - AssetHubRococo::force_create_and_mint_asset( - ASSET_ID, - ASSET_MIN_BALANCE, - true, - AssetHubRococoSender::get(), - Some(Weight::from_parts(1_019_445_000, 200_000)), - ASSET_MIN_BALANCE * 1000000, - ); + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; - // Init values for System Parachain - let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); + let sender_assets_before = AssetHubRococo::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &AssetHubRococoSender::get()) + }); + let receiver_assets_before = PenpalA::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &PenpalAReceiver::get()) + }); - let system_para_test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; + test.set_assertion::(system_para_to_para_assets_sender_assertions); + test.set_assertion::(system_para_to_para_assets_receiver_assertions); + test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); + test.assert(); - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + // Sender's balance is reduced + assert!(sender_balance_after < sender_balance_before); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + + let sender_assets_after = AssetHubRococo::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &AssetHubRococoSender::get()) + }); + let receiver_assets_after = PenpalA::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &PenpalAReceiver::get()) + }); - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_reserve_transfer_assets); - system_para_test.assert(); + // Sender's balance is reduced + assert_eq!(sender_assets_before - asset_amount_to_send, sender_assets_after); + // Receiver's balance is increased + assert!(receiver_assets_after > receiver_assets_before); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs index 0d2ca685247..f8017f7a1c5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -51,7 +51,7 @@ fn relay_dest_assertions(t: SystemParaToRelayTest) { assert_expected_events!( Rococo, vec![ - // Amount is witdrawn from Relay Chain's `CheckAccount` + // Amount is withdrawn from Relay Chain's `CheckAccount` RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who, amount }) => { who: *who == ::XcmPallet::check_account(), amount: *amount == t.args.amount, @@ -157,10 +157,12 @@ fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { fn limited_teleport_native_assets_from_relay_to_system_para_works() { // Init values for Relay Chain let amount_to_send: Balance = ROCOCO_ED * 1000; + let dest = Rococo::child_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); let test_args = TestContext { sender: RococoSender::get(), receiver: AssetHubRococoReceiver::get(), - args: relay_test_args(amount_to_send), + args: relay_test_args(dest, beneficiary_id, amount_to_send), }; let mut test = RelayToSystemParaTest::new(test_args); @@ -204,7 +206,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -245,7 +247,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -278,10 +280,12 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { fn teleport_native_assets_from_relay_to_system_para_works() { // Init values for Relay Chain let amount_to_send: Balance = ROCOCO_ED * 1000; + let dest = Rococo::child_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); let test_args = TestContext { sender: RococoSender::get(), receiver: AssetHubRococoReceiver::get(), - args: relay_test_args(amount_to_send), + args: relay_test_args(dest, beneficiary_id, amount_to_send), }; let mut test = RelayToSystemParaTest::new(test_args); @@ -325,7 +329,7 @@ fn teleport_native_assets_back_from_system_para_to_relay_works() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -366,7 +370,7 @@ fn teleport_native_assets_from_system_para_to_relay_fails() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index e52ad448c0b..83a867e6ae3 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -65,7 +65,7 @@ pub type RelayToSystemParaTest = Test; pub type SystemParaToRelayTest = Test; pub type SystemParaToParaTest = Test; -/// Returns a `TestArgs` instance to de used for the Relay Chain accross integraton tests +/// Returns a `TestArgs` instance to be used for the Relay Chain across integration tests pub fn relay_test_args(amount: Balance) -> TestArgs { TestArgs { dest: Westend::child_location_of(AssetHubWestend::para_id()), @@ -82,7 +82,7 @@ pub fn relay_test_args(amount: Balance) -> TestArgs { } } -/// Returns a `TestArgs` instance to de used for the System Parachain accross integraton tests +/// Returns a `TestArgs` instance to be used for the System Parachain across integration tests pub fn system_para_test_args( dest: MultiLocation, beneficiary_id: AccountId32, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 19a203897ad..5b2c648b7b0 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -15,37 +15,8 @@ use crate::*; use asset_hub_westend_runtime::xcm_config::XcmConfig; -use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; -fn relay_origin_assertions(t: RelayToSystemParaTest) { - type RuntimeEvent = ::RuntimeEvent; - - Westend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(629_384_000, 6_196))); - - assert_expected_events!( - Westend, - vec![ - // Amount to reserve transfer is transferred to System Parachain's Sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Transfer { from, to, amount }) => { - from: *from == t.sender.account_id, - to: *to == Westend::sovereign_account_id_of( - t.args.dest - ), - amount: *amount == t.args.amount, - }, - ] - ); -} - -fn system_para_dest_assertions(_t: RelayToSystemParaTest) { - AssetHubWestend::assert_dmp_queue_incomplete(Some(Weight::from_parts(31_352_000, 1489))); -} - -fn system_para_to_relay_assertions(_t: SystemParaToRelayTest) { - AssetHubWestend::assert_xcm_pallet_attempted_error(Some(XcmError::Barrier)) -} - -fn system_para_to_para_assertions(t: SystemParaToParaTest) { +fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; AssetHubWestend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( @@ -56,7 +27,7 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubWestend, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Balances( pallet_balances::Event::Transfer { from, to, amount } ) => { @@ -70,6 +41,19 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { ); } +fn para_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; @@ -81,7 +65,7 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubWestend, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Assets( pallet_assets::Event::Transferred { asset_id, from, to, amount } ) => { @@ -96,48 +80,6 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { ); } -fn relay_limited_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::limited_reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - -fn relay_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - -fn system_para_limited_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::limited_reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - -fn system_para_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, @@ -149,187 +91,72 @@ fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) ) } -fn system_para_to_para_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - -/// Limited Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't -/// work -#[test] -fn limited_reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain - let amount_to_send: Balance = WESTEND_ED * 1000; - let test_args = TestContext { - sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: relay_test_args(amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions); - test.set_dispatchable::(relay_limited_reserve_transfer_assets); - test.assert(); - - let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Limited Reserve Transfers of native asset from System Parachain to Relay Chain shoudln't work -#[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_relay_fails() { - // Init values for System Parachain - let destination = AssetHubWestend::parent_location(); - let beneficiary_id = WestendReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - /// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work #[test] fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain + let signed_origin = ::RuntimeOrigin::signed(WestendSender::get().into()); + let destination = Westend::child_location_of(AssetHubWestend::para_id()); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: AssetHubWestendReceiver::get().into() }.into(); let amount_to_send: Balance = WESTEND_ED * 1000; - let test_args = TestContext { - sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: relay_test_args(amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions); - test.set_dispatchable::(relay_reserve_transfer_assets); - test.assert(); - - let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + let assets: MultiAssets = (Here, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + Westend::execute_with(|| { + let result = ::XcmPallet::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 99, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); } /// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work #[test] fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { // Init values for System Parachain + let signed_origin = + ::RuntimeOrigin::signed(AssetHubWestendSender::get().into()); let destination = AssetHubWestend::parent_location(); let beneficiary_id = WestendReceiver::get(); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: beneficiary_id.into() }.into(); let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Limited Reserve Transfers of native asset from System Parachain to Parachain should work -#[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_para() { - // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` - test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - - let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::<::XcmSender>( - test.args.assets.clone(), - 0, - test.args.weight_limit, - test.args.beneficiary, - test.args.dest, - ) + let assets: MultiAssets = (Parent, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + AssetHubWestend::execute_with(|| { + let result = + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); }); - - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers } /// Reserve Transfers of native asset from System Parachain to Parachain should work @@ -350,14 +177,15 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { let mut test = SystemParaToParaTest::new(test_args); let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` - test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); + test.set_assertion::(system_para_to_para_sender_assertions); + test.set_assertion::(para_receiver_assertions); + test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); test.assert(); let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubWestend::execute_with(|| { xcm_helpers::transfer_assets_delivery_fees::<::XcmSender>( @@ -369,45 +197,10 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { ) }); + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers -} - -/// Limited Reserve Transfers of a local asset from System Parachain to Parachain should work -#[test] -fn limited_reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account - AssetHubWestend::force_create_and_mint_asset( - ASSET_ID, - ASSET_MIN_BALANCE, - true, - AssetHubWestendSender::get(), - Some(Weight::from_parts(1_019_445_000, 200_000)), - ASSET_MIN_BALANCE * 1000000, - ); - - // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); - - let system_para_test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); - - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); - system_para_test.assert(); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); } /// Reserve Transfers of a local asset from System Parachain to Parachain should work @@ -442,6 +235,6 @@ fn reserve_transfer_asset_from_system_para_to_para() { system_para_test.set_assertion::(system_para_to_para_assets_assertions); // TODO: Add assertions when Penpal is able to manage assets system_para_test - .set_dispatchable::(system_para_to_para_reserve_transfer_assets); + .set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); system_para_test.assert(); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index 57e1b93f349..d618cd2fe04 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -51,7 +51,7 @@ fn relay_dest_assertions(t: SystemParaToRelayTest) { assert_expected_events!( Westend, vec![ - // Amount is witdrawn from Relay Chain's `CheckAccount` + // Amount is withdrawn from Relay Chain's `CheckAccount` RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who, amount }) => { who: *who == ::XcmPallet::check_account(), amount: *amount == t.args.amount, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index 52ad3241e51..e4ed77884bf 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -962,7 +962,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1200,6 +1200,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -1243,6 +1244,39 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + ParentThen(Parachain(random_para_id).into()).into(), + )) + } + } + use xcm::latest::prelude::*; use xcm_config::{KsmLocation, MaxAssetsIntoHolding}; use pallet_xcm_benchmarks::asset_instance_from; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs index d63c126f082..05262e07410 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs @@ -552,11 +552,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -586,8 +581,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs index 7d49b56e461..cdd4290770f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs @@ -18,13 +18,14 @@ //! Tests for the Statemine (Kusama Assets Hub) chain. use asset_hub_kusama_runtime::xcm_config::{ - AssetFeeAsExistentialDepositMultiplierFeeCharger, KsmLocation, TrustBackedAssetsPalletLocation, + AssetFeeAsExistentialDepositMultiplierFeeCharger, KsmLocation, LocationToAccountId, + TrustBackedAssetsPalletLocation, }; pub use asset_hub_kusama_runtime::{ xcm_config::{CheckingAccount, ForeignCreatorsSovereignAccountOf, XcmConfig}, AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, - RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, + RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{CollatorSessionKeys, ExtBuilder}; use codec::{Decode, Encode}; @@ -518,12 +519,6 @@ asset_test_utils::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1000 ); @@ -632,3 +627,32 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p assert_eq!(ForeignAssets::asset_ids().collect::>().len(), 1); }) ); + +#[test] +fn reserve_transfer_native_asset_to_non_teleport_para_works() { + asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + WeightLimit::Unlimited, + ); +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs index 57a1150bc88..6f853b6f56e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs @@ -868,7 +868,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1082,6 +1082,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -1124,6 +1125,39 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + ParentThen(Parachain(random_para_id).into()).into(), + )) + } + } + use xcm::latest::prelude::*; use xcm_config::{DotLocation, MaxAssetsIntoHolding}; use pallet_xcm_benchmarks::asset_instance_from; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs index 6035789a1ae..b3c2ce4da76 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs @@ -476,11 +476,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -510,8 +505,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs index 7200ebc16a2..b7e44646ece 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs @@ -19,12 +19,13 @@ use asset_hub_polkadot_runtime::xcm_config::{ AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, DotLocation, - ForeignCreatorsSovereignAccountOf, TrustBackedAssetsPalletLocation, XcmConfig, + ForeignCreatorsSovereignAccountOf, LocationToAccountId, TrustBackedAssetsPalletLocation, + XcmConfig, }; pub use asset_hub_polkadot_runtime::{ AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, - RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, + RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{CollatorSessionKeys, ExtBuilder}; use codec::{Decode, Encode}; @@ -531,12 +532,6 @@ asset_test_utils::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1000 ); @@ -657,3 +652,32 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p assert_eq!(ForeignAssets::asset_ids().collect::>().len(), 1); }) ); + +#[test] +fn reserve_transfer_native_asset_to_non_teleport_para_works() { + asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + WeightLimit::Unlimited, + ); +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index f649ebedeff..06dcfb99a65 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1077,7 +1077,7 @@ mod benches { [pallet_xcm_bridge_hub_router, ToWestend] [pallet_xcm_bridge_hub_router, ToRococo] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1315,6 +1315,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; // This is defined once again in dispatch_benchmark, because list_benchmarks! @@ -1368,6 +1369,39 @@ impl_runtime_apis! { Config as XcmBridgeHubRouterConfig, }; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + ParentThen(Parachain(random_para_id).into()).into(), + )) + } + } + impl XcmBridgeHubRouterConfig for Runtime { fn make_congested() { cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index ff37ff74e75..b0bf9e82729 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -667,11 +667,6 @@ pub type XcmRouter = WithUniqueTopic<( ToRococoXcmRouter, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -701,8 +696,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index c3d3c4abbbb..b4f4e828dde 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -529,12 +529,6 @@ asset_test_utils::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1000 ); @@ -930,6 +924,35 @@ mod asset_hub_rococo_tests { actual ); } + + #[test] + fn reserve_transfer_native_asset_to_non_teleport_para_works() { + asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + WeightLimit::Unlimited, + ); + } } mod asset_hub_wococo_tests { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index f1a15265b90..d88aa2607e2 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1013,7 +1013,7 @@ mod benches { [cumulus_pallet_dmp_queue, DmpQueue] [pallet_xcm_bridge_hub_router, ToRococo] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1297,6 +1297,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; // This is defined once again in dispatch_benchmark, because list_benchmarks! @@ -1343,6 +1344,39 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + ParentThen(Parachain(random_para_id).into()).into(), + )) + } + } + use pallet_xcm_bridge_hub_router::benchmarking::{ Pallet as XcmBridgeHubRouterBench, Config as XcmBridgeHubRouterConfig, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index ec42618513a..17312c0f46e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -636,11 +636,6 @@ pub type XcmRouter = WithUniqueTopic<( ToRococoXcmRouter, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -666,8 +661,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index de87a98fb0b..7922b04e807 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -525,12 +525,6 @@ asset_test_utils::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1000 ); @@ -815,3 +809,32 @@ fn change_xcm_bridge_hub_router_byte_fee_by_governance_works() { }, ) } + +#[test] +fn reserve_transfer_native_asset_to_non_teleport_para_works() { + asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + WeightLimit::Unlimited, + ); +} diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs b/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs index 471b1f09b56..872ad06ddd5 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs @@ -27,6 +27,21 @@ use std::fmt::Debug; use xcm::latest::prelude::*; use xcm_builder::{CreateMatcher, MatchXcm}; +/// Given a message, a sender, and a destination, it returns the delivery fees +fn get_fungible_delivery_fees(destination: MultiLocation, message: Xcm<()>) -> u128 { + let Ok((_, delivery_fees)) = validate_send::(destination, message) else { + unreachable!("message can be sent; qed") + }; + if let Some(delivery_fee) = delivery_fees.inner().first() { + let Fungible(delivery_fee_amount) = delivery_fee.fun else { + unreachable!("asset is fungible; qed"); + }; + delivery_fee_amount + } else { + 0 + } +} + /// Helper function to verify `xcm` contains all relevant instructions expected on destination /// chain as part of a reserve-asset-transfer. pub(crate) fn assert_matches_reserve_asset_deposited_instructions( diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 5fb34e7a571..f1cc76350a0 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -16,25 +16,28 @@ //! Module contains predefined test-case scenarios for `Runtime` with various assets. use super::xcm_helpers; +use crate::{assert_matches_reserve_asset_deposited_instructions, get_fungible_delivery_fees}; use codec::Encode; +use cumulus_primitives_core::XcmpMessageSource; use frame_support::{ assert_noop, assert_ok, traits::{ - fungible::Mutate, fungibles::InspectEnumerable, Get, OnFinalize, OnInitialize, OriginTrait, + fungible::Mutate, fungibles::InspectEnumerable, Currency, Get, OnFinalize, OnInitialize, + OriginTrait, }, weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; use parachains_common::{AccountId, Balance}; use parachains_runtimes_test_utils::{ - assert_metadata, assert_total, AccountIdOf, BalanceOf, CollatorSessionKeys, ExtBuilder, - ValidatorIdOf, XcmReceivedFrom, + assert_metadata, assert_total, mock_open_hrmp_channel, AccountIdOf, BalanceOf, + CollatorSessionKeys, ExtBuilder, ValidatorIdOf, XcmReceivedFrom, }; use sp_runtime::{ traits::{MaybeEquivalence, StaticLookup, Zero}, DispatchError, Saturating, }; -use xcm::latest::prelude::*; +use xcm::{latest::prelude::*, VersionedMultiAssets}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; type RuntimeHelper = @@ -43,8 +46,8 @@ type RuntimeHelper = // Re-export test_case from `parachains-runtimes-test-utils` pub use parachains_runtimes_test_utils::test_cases::change_storage_constant_by_governance_works; -/// Test-case makes sure that `Runtime` can receive native asset from relay chain -/// and can teleport it back and to the other parachains +/// Test-case makes sure that `Runtime` can receive native asset from relay chain and can teleport +/// it back pub fn teleports_for_native_asset_works< Runtime, AllPalletsWithoutSystem, @@ -57,9 +60,6 @@ pub fn teleports_for_native_asset_works< existential_deposit: BalanceOf, target_account: AccountIdOf, unwrap_pallet_xcm_event: Box) -> Option>>, - unwrap_xcmp_queue_event: Box< - dyn Fn(Vec) -> Option>, - >, runtime_para_id: u32, ) where Runtime: frame_system::Config @@ -164,12 +164,13 @@ pub fn teleports_for_native_asset_works< // 2. try to teleport asset back to the relaychain { let dest = MultiLocation::parent(); - let dest_beneficiary = MultiLocation::parent() + let mut dest_beneficiary = MultiLocation::parent() .appended_with(AccountId32 { network: None, id: sp_runtime::AccountId32::new([3; 32]).into(), }) .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); let target_account_balance_before_teleport = >::free_balance(&target_account); @@ -223,65 +224,53 @@ pub fn teleports_for_native_asset_works< ); } - // 3. try to teleport asset away to other parachain (1234) + // 3. try to teleport assets away to other parachain (2345): should not work as we don't + // trust `IsTeleporter` for `(relay-native-asset, para(2345))` pair { - let other_para_id = 1234; + let other_para_id = 2345; let dest = MultiLocation::new(1, X1(Parachain(other_para_id))); - let dest_beneficiary = MultiLocation::new(1, X1(Parachain(other_para_id))) + let mut dest_beneficiary = MultiLocation::new(1, X1(Parachain(other_para_id))) .appended_with(AccountId32 { network: None, id: sp_runtime::AccountId32::new([3; 32]).into(), }) .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); let target_account_balance_before_teleport = >::free_balance(&target_account); + let native_asset_to_teleport_away = native_asset_amount_unit * 3.into(); assert!( native_asset_to_teleport_away < target_account_balance_before_teleport - existential_deposit ); - - assert_ok!(RuntimeHelper::::do_teleport_assets::( - RuntimeHelper::::origin_of(target_account.clone()), - dest, - dest_beneficiary, - (native_asset_id, native_asset_to_teleport_away.into()), - Some((runtime_para_id, other_para_id)), - included_head, - &alice, - )); - - let delivery_fees = - xcm_helpers::transfer_assets_delivery_fees::( - (native_asset_id, native_asset_to_teleport_away.into()).into(), - 0, - Unlimited, - dest_beneficiary, + assert_eq!( + RuntimeHelper::::do_teleport_assets::( + RuntimeHelper::::origin_of(target_account.clone()), dest, - ); + dest_beneficiary, + (native_asset_id, native_asset_to_teleport_away.into()), + Some((runtime_para_id, other_para_id)), + included_head, + &alice, + ), + Err(DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [2, 0, 0, 0,], + message: Some("Filtered",), + },),) + ); // check balances assert_eq!( >::free_balance(&target_account), - target_account_balance_before_teleport - - native_asset_to_teleport_away - - delivery_fees.into() + target_account_balance_before_teleport ); assert_eq!( >::free_balance(&CheckingAccount::get()), 0.into() ); - - // check events - RuntimeHelper::::assert_pallet_xcm_event_outcome( - &unwrap_pallet_xcm_event, - |outcome| { - assert_ok!(outcome.ensure_complete()); - }, - ); - assert!(RuntimeHelper::::xcmp_queue_message_sent(unwrap_xcmp_queue_event) - .is_some()); } }) } @@ -298,7 +287,6 @@ macro_rules! include_teleports_for_native_asset_works( $collator_session_key:expr, $existential_deposit:expr, $unwrap_pallet_xcm_event:expr, - $unwrap_xcmp_queue_event:expr, $runtime_para_id:expr ) => { #[test] @@ -318,15 +306,14 @@ macro_rules! include_teleports_for_native_asset_works( $existential_deposit, target_account, $unwrap_pallet_xcm_event, - $unwrap_xcmp_queue_event, $runtime_para_id ) } } ); -/// Test-case makes sure that `Runtime` can receive teleported assets from sibling parachain relay -/// chain +/// Test-case makes sure that `Runtime` can receive teleported assets from sibling parachain, and +/// can teleport it back pub fn teleports_for_foreign_assets_works< Runtime, AllPalletsWithoutSystem, @@ -381,7 +368,7 @@ pub fn teleports_for_foreign_assets_works< ::AccountId: From, ForeignAssetsPalletInstance: 'static, { - // foreign parachain with the same consenus currency as asset + // foreign parachain with the same consensus currency as asset let foreign_para_id = 2222; let foreign_asset_id_multilocation = MultiLocation { parents: 1, @@ -473,7 +460,7 @@ pub fn teleports_for_foreign_assets_works< >(foreign_asset_id_multilocation, 0, 0); assert!(teleported_foreign_asset_amount > asset_minimum_asset_balance); - // 1. process received teleported assets from relaychain + // 1. process received teleported assets from sibling parachain (foreign_para_id) let xcm = Xcm(vec![ // BuyExecution with relaychain native token WithdrawAsset(buy_execution_fee.clone().into()), @@ -551,12 +538,13 @@ pub fn teleports_for_foreign_assets_works< // 2. try to teleport asset back to source parachain (foreign_para_id) { let dest = MultiLocation::new(1, X1(Parachain(foreign_para_id))); - let dest_beneficiary = MultiLocation::new(1, X1(Parachain(foreign_para_id))) + let mut dest_beneficiary = MultiLocation::new(1, X1(Parachain(foreign_para_id))) .appended_with(AccountId32 { network: None, id: sp_runtime::AccountId32::new([3; 32]).into(), }) .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); let target_account_balance_before_teleport = >::balance( @@ -1108,7 +1096,7 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor AssetId: Clone + Copy, AssetIdConverter: MaybeEquivalence, { - // foreign parachain with the same consenus currency as asset + // foreign parachain with the same consensus currency as asset let foreign_asset_id_multilocation = MultiLocation { parents: 1, interior: X2(Parachain(2222), GeneralIndex(1234567)) }; let asset_id = AssetIdConverter::convert(&foreign_asset_id_multilocation).unwrap(); @@ -1388,3 +1376,199 @@ macro_rules! include_create_and_manage_foreign_assets_for_local_consensus_parach } } ); + +/// Test-case makes sure that `Runtime` can reserve-transfer asset to other parachains (where +/// teleport is not trusted) +pub fn reserve_transfer_native_asset_to_non_teleport_para_works< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + HrmpChannelOpener, + HrmpChannelSource, + LocationToAccountId, +>( + collator_session_keys: CollatorSessionKeys, + existential_deposit: BalanceOf, + alice_account: AccountIdOf, + unwrap_pallet_xcm_event: Box) -> Option>>, + unwrap_xcmp_queue_event: Box< + dyn Fn(Vec) -> Option>, + >, + weight_limit: WeightLimit, +) where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config, + AllPalletsWithoutSystem: + OnInitialize> + OnFinalize>, + AccountIdOf: Into<[u8; 32]>, + ValidatorIdOf: From>, + BalanceOf: From, + ::Balance: From + Into, + XcmConfig: xcm_executor::Config, + LocationToAccountId: ConvertLocation>, + ::AccountId: + Into<<::RuntimeOrigin as OriginTrait>::AccountId>, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + ::AccountId: From, + HrmpChannelOpener: frame_support::inherent::ProvideInherent< + Call = cumulus_pallet_parachain_system::Call, + >, + HrmpChannelSource: XcmpMessageSource, +{ + let runtime_para_id = 1000; + ExtBuilder::::default() + .with_collators(collator_session_keys.collators()) + .with_session_keys(collator_session_keys.session_keys()) + .with_tracing() + .with_safe_xcm_version(3) + .with_para_id(runtime_para_id.into()) + .build() + .execute_with(|| { + let mut alice = [0u8; 32]; + alice[0] = 1; + let included_head = RuntimeHelper::::run_to_block( + 2, + AccountId::from(alice).into(), + ); + + // reserve-transfer native asset with local reserve to remote parachain (2345) + + let other_para_id = 2345; + let native_asset = MultiLocation::parent(); + let dest = MultiLocation::new(1, X1(Parachain(other_para_id))); + let mut dest_beneficiary = MultiLocation::new(1, X1(Parachain(other_para_id))) + .appended_with(AccountId32 { + network: None, + id: sp_runtime::AccountId32::new([3; 32]).into(), + }) + .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); + + let reserve_account = LocationToAccountId::convert_location(&dest) + .expect("Sovereign account for reserves"); + let balance_to_transfer = 1_000_000_000_000_u128; + + // open HRMP to other parachain + mock_open_hrmp_channel::( + runtime_para_id.into(), + other_para_id.into(), + included_head, + &alice, + ); + + // we calculate exact delivery fees _after_ sending the message by weighing the sent + // xcm, and this delivery fee varies for different runtimes, so just add enough buffer, + // then verify the arithmetics check out on final balance. + let delivery_fees_buffer = 40_000_000_000u128; + // drip 2xED + transfer_amount + delivery_fees_buffer to Alice account + let alice_account_init_balance = existential_deposit.saturating_mul(2.into()) + + balance_to_transfer.into() + + delivery_fees_buffer.into(); + let _ = >::deposit_creating( + &alice_account, + alice_account_init_balance, + ); + // SA of target location needs to have at least ED, otherwise making reserve fails + let _ = >::deposit_creating( + &reserve_account, + existential_deposit, + ); + + // we just check here, that user retains enough balance after withdrawal + // and also we check if `balance_to_transfer` is more than `existential_deposit`, + assert!( + (>::free_balance(&alice_account) - + balance_to_transfer.into()) >= + existential_deposit + ); + // SA has just ED + assert_eq!( + >::free_balance(&reserve_account), + existential_deposit + ); + + // local native asset (pallet_balances) + let asset_to_transfer = MultiAsset { + fun: Fungible(balance_to_transfer.into()), + id: Concrete(native_asset), + }; + + // pallet_xcm call reserve transfer + assert_ok!(>::limited_reserve_transfer_assets( + RuntimeHelper::::origin_of(alice_account.clone()), + Box::new(dest.into_versioned()), + Box::new(dest_beneficiary.into_versioned()), + Box::new(VersionedMultiAssets::from(MultiAssets::from(asset_to_transfer))), + 0, + weight_limit, + )); + + // check events + // check pallet_xcm attempted + RuntimeHelper::::assert_pallet_xcm_event_outcome( + &unwrap_pallet_xcm_event, + |outcome| { + assert_ok!(outcome.ensure_complete()); + }, + ); + + // check that xcm was sent + let xcm_sent_message_hash = >::events() + .into_iter() + .filter_map(|e| unwrap_xcmp_queue_event(e.event.encode())) + .find_map(|e| match e { + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { message_hash } => + Some(message_hash), + _ => None, + }); + + // read xcm + let xcm_sent = RuntimeHelper::::take_xcm( + other_para_id.into(), + ) + .unwrap(); + + let delivery_fees = get_fungible_delivery_fees::< + ::XcmSender, + >(dest, Xcm::try_from(xcm_sent.clone()).unwrap()); + + assert_eq!( + xcm_sent_message_hash, + Some(xcm_sent.using_encoded(sp_io::hashing::blake2_256)) + ); + let mut xcm_sent: Xcm<()> = xcm_sent.try_into().expect("versioned xcm"); + + // check sent XCM Program to other parachain + println!("reserve_transfer_native_asset_works sent xcm: {:?}", xcm_sent); + let reserve_assets_deposited = MultiAssets::from(vec![MultiAsset { + id: Concrete(MultiLocation { parents: 1, interior: Here }), + fun: Fungible(1000000000000), + }]); + + assert_matches_reserve_asset_deposited_instructions( + &mut xcm_sent, + &reserve_assets_deposited, + &dest_beneficiary, + ); + + // check alice account decreased by balance_to_transfer ( + delivery_fees) + assert_eq!( + >::free_balance(&alice_account), + alice_account_init_balance - balance_to_transfer.into() - delivery_fees.into() + ); + + // check reserve account + // check reserve account increased by balance_to_transfer + assert_eq!( + >::free_balance(&reserve_account), + existential_deposit + balance_to_transfer.into() + ); + }) +} diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 6c8ac8c6452..851fcd5c7d6 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -16,7 +16,7 @@ //! Module contains predefined test-case scenarios for `Runtime` with various assets transferred //! over a bridge. -use crate::assert_matches_reserve_asset_deposited_instructions; +use crate::{assert_matches_reserve_asset_deposited_instructions, get_fungible_delivery_fees}; use codec::Encode; use cumulus_primitives_core::XcmpMessageSource; use frame_support::{ @@ -32,10 +32,7 @@ use parachains_runtimes_test_utils::{ use sp_runtime::{traits::StaticLookup, Saturating}; use xcm::{latest::prelude::*, VersionedMultiAssets}; use xcm_builder::{CreateMatcher, MatchXcm}; -use xcm_executor::{ - traits::{ConvertLocation, TransactAsset}, - XcmExecutor, -}; +use xcm_executor::{traits::ConvertLocation, XcmExecutor}; pub struct TestBridgingConfig { pub bridged_network: NetworkId, @@ -129,8 +126,13 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< &alice, ); - // drip ED to account - let alice_account_init_balance = existential_deposit + balance_to_transfer.into(); + // we calculate exact delivery fees _after_ sending the message by weighing the sent + // xcm, and this delivery fee varies for different runtimes, so just add enough buffer, + // then verify the arithmetics check out on final balance. + let delivery_fees_buffer = 8_000_000_000_000u128; + // drip ED + transfer_amount + delivery_fees_buffer to Alice account + let alice_account_init_balance = + existential_deposit + balance_to_transfer.into() + delivery_fees_buffer.into(); let _ = >::deposit_creating( &alice_account, alice_account_init_balance, @@ -183,56 +185,6 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< let expected_beneficiary = target_destination_account; - // Make sure sender has enough funds for paying delivery fees - let handling_delivery_fees = { - // Probable XCM with `ReserveAssetDeposited`. - let mut expected_reserve_asset_deposited_message = Xcm(vec![ - ReserveAssetDeposited(MultiAssets::from(expected_assets.clone())), - ClearOrigin, - BuyExecution { - fees: MultiAsset { - id: Concrete(Default::default()), - fun: Fungible(balance_to_transfer), - }, - weight_limit: Unlimited, - }, - DepositAsset { assets: Wild(AllCounted(1)), beneficiary: expected_beneficiary }, - SetTopic([ - 220, 188, 144, 32, 213, 83, 111, 175, 44, 210, 111, 19, 90, 165, 191, 112, - 140, 247, 192, 124, 42, 17, 153, 141, 114, 34, 189, 20, 83, 69, 237, 173, - ]), - ]); - assert_matches_reserve_asset_deposited_instructions( - &mut expected_reserve_asset_deposited_message, - &expected_assets, - &expected_beneficiary, - ); - - // Call `SendXcm::validate` to get delivery fees. - let (_, delivery_fees): (_, MultiAssets) = XcmConfig::XcmSender::validate( - &mut Some(target_location_from_different_consensus), - &mut Some(expected_reserve_asset_deposited_message), - ) - .expect("validate passes"); - // Drip delivery fee to Alice account. - let mut delivery_fees_added = false; - for delivery_fee in delivery_fees.inner() { - assert_ok!(::deposit_asset( - &delivery_fee, - &MultiLocation { - parents: 0, - interior: X1(AccountId32 { - network: None, - id: alice_account.clone().into(), - }), - }, - None, - )); - delivery_fees_added = true; - } - delivery_fees_added - }; - // do pallet_xcm call reserve transfer assert_ok!(>::limited_reserve_transfer_assets( RuntimeHelper::::origin_of(alice_account.clone()), @@ -275,6 +227,7 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< // check sent XCM ExportMessage to BridgeHub + let mut delivery_fees = 0; // 1. check paid or unpaid if let Some(expected_fee_asset_id) = maybe_paid_export_message { xcm_sent @@ -315,6 +268,10 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< .split_global() .expect("split works"); assert_eq!(destination, &target_location_junctions_without_global_consensus); + // Call `SendXcm::validate` to get delivery fees. + delivery_fees = get_fungible_delivery_fees::< + ::XcmSender, + >(target_location_from_different_consensus, inner_xcm.clone()); assert_matches_reserve_asset_deposited_instructions( inner_xcm, &expected_assets, @@ -330,8 +287,8 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< assert_eq!( >::free_balance(&alice_account), alice_account_init_balance - .saturating_sub(existential_deposit) .saturating_sub(balance_to_transfer.into()) + .saturating_sub(delivery_fees.into()) ); // check reserve account increased by balance_to_transfer @@ -341,14 +298,13 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< ); // check dedicated account increased by delivery fees (if configured) - if handling_delivery_fees { - if let Some(delivery_fees_account) = delivery_fees_account { - let delivery_fees_account_balance_after = - >::free_balance(&delivery_fees_account); - assert!( - delivery_fees_account_balance_after > delivery_fees_account_balance_before - ); - } + if let Some(delivery_fees_account) = delivery_fees_account { + let delivery_fees_account_balance_after = + >::free_balance(&delivery_fees_account); + assert!( + delivery_fees_account_balance_after - delivery_fees.into() >= + delivery_fees_account_balance_before + ); } }) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/README.md b/cumulus/parachains/runtimes/bridge-hubs/README.md index 9bd6557f350..b2a14a0405d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/README.md +++ b/cumulus/parachains/runtimes/bridge-hubs/README.md @@ -270,7 +270,7 @@ cd ### Send messages - transfer asset over bridge (ROCs/WNDs) -Do (asset) transfers: +Do reserve-backed transfers: ``` cd @@ -291,6 +291,20 @@ cd - AssetHubWestend (see `foreignAssets.Issued`, `xcmpQueue.Success`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9010#/explorer - BridgeHubRocococ (see `bridgeWestendMessages.MessagesDelivered`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer +Do reserve withdraw transfers: (when previous is finished) +``` +cd + +# wrappedWNDs from Rococo's Asset Hub to Westend's. +./cumulus/scripts/bridges_rococo_westend.sh withdraw-reserve-assets-from-asset-hub-rococo-local +``` +``` +cd + +# wrappedROCs from Westend's Asset Hub to Rococo's. +./cumulus/scripts/bridges_rococo_westend.sh withdraw-reserve-assets-from-asset-hub-westend-local +``` + ### Claim relayer's rewards on BridgeHubRococo and BridgeHubWestend **Accounts of BridgeHub parachains:** diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs index bd95ec3fda7..b3750700084 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs @@ -490,7 +490,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -670,6 +670,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -705,6 +706,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + use xcm::latest::prelude::*; use xcm_config::KsmRelayLocation; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs index 727735c9285..b3703eee830 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs @@ -241,11 +241,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -274,8 +269,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs index 893524e12f6..36d8f0846af 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs @@ -47,11 +47,5 @@ bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1002 ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs index 4744dc08e8e..841bb4ee861 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs @@ -491,7 +491,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -671,6 +671,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -706,6 +707,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + use xcm::latest::prelude::*; use xcm_config::DotRelayLocation; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs index ac7e00fc427..61eee1c4c5a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs @@ -245,11 +245,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -278,8 +273,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs index 0be87bd46fa..3156a5fe68e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs @@ -47,11 +47,5 @@ bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1002 ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index e5d38bcac23..b17d308b891 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -595,7 +595,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -933,6 +933,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -980,6 +981,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + use xcm::latest::prelude::*; use xcm_config::TokenLocation; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index d98012e061b..1b1e6f8ba71 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -349,11 +349,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type XcmRouter = XcmRouter; @@ -381,8 +376,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 65cca67dac1..39ee2576f5b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -133,12 +133,6 @@ mod bridge_hub_rococo_tests { _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID ); @@ -517,12 +511,6 @@ mod bridge_hub_wococo_tests { _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 458876ce46c..9e8fd84e712 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -544,7 +544,7 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -772,6 +772,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -813,6 +814,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + use xcm::latest::prelude::*; use xcm_config::WestendLocation; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index a6abca42215..7084882c41f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -284,11 +284,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type XcmRouter = XcmRouter; @@ -316,8 +311,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 16dcd10a2ca..4d477e1413e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -118,12 +118,6 @@ bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID ); diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index b7bfc9f9c6a..206f4614060 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -751,7 +751,7 @@ mod benches { [cumulus_pallet_dmp_queue, DmpQueue] [pallet_alliance, Alliance] [pallet_collective, AllianceMotion] - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] [pallet_preimage, Preimage] [pallet_scheduler, Scheduler] [pallet_referenda, FellowshipReferenda] @@ -939,6 +939,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -968,6 +969,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between Collectives and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on Collectives. + None + } + } + let whitelist: Vec = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs index c0b3108d2fb..71845650bd6 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs @@ -293,11 +293,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - /// Type to convert the Fellows origin to a Plurality `MultiLocation` value. pub type FellowsToPlurality = OriginToPluralityVoice; @@ -325,8 +320,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index e41db7d9213..2a2f4141033 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -433,7 +433,7 @@ mod benches { [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] [pallet_contracts, Contracts] - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] ); } @@ -678,6 +678,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -707,6 +708,30 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use xcm::latest::prelude::*; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between Contracts-System-Para and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on Contracts-System-Para. + None + } + } + let whitelist: Vec = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index 4c9f357e111..faee1c68fe6 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -227,11 +227,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -258,8 +253,6 @@ impl pallet_xcm::Config for Runtime { type MaxLockers = ConstU32<8>; // FIXME: Replace with benchmarked weight info type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 5ef9af7c712..1ddad31920a 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -434,7 +434,7 @@ parameter_types! { // pub type AssetsForceOrigin = // EnsureOneOf, EnsureXcm>>; -impl pallet_assets::Config for Runtime { +impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type AssetId = AssetId; @@ -577,7 +577,12 @@ impl pallet_asset_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; type OnChargeAssetTransaction = pallet_asset_tx_payment::FungiblesAdapter< - pallet_assets::BalanceToAssetBalance, + pallet_assets::BalanceToAssetBalance< + Balances, + Runtime, + ConvertInto, + pallet_assets::Instance1, + >, AssetsToBlockAuthor, >; } @@ -619,7 +624,7 @@ construct_runtime!( MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // The main stage. - Assets: pallet_assets::{Pallet, Call, Storage, Event} = 50, + Assets: pallet_assets::::{Pallet, Call, Storage, Event} = 50, Sudo: pallet_sudo::{Pallet, Call, Storage, Event, Config} = 255, } diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index 710dfd79877..74d9a0b071d 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -38,6 +38,7 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_asset_tx_payment::HandleCredit; +use pallet_assets::Instance1; use pallet_xcm::XcmPassthrough; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::impls::ToAuthor; @@ -48,9 +49,10 @@ use xcm_builder::{ AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, AsPrefixedGeneralIndex, ConvertedConcreteId, CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, FungiblesAdapter, IsConcrete, LocalMint, NativeAsset, - ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, + SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::{traits::JustTry, XcmExecutor}; @@ -126,6 +128,9 @@ pub type XcmOriginToTransactDispatchOrigin = ( // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when // recognized. SiblingParachainAsNative, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, // Native signed account converter; this just converts an `AccountId32` origin into a normal // `RuntimeOrigin::Signed` origin of the same 32-byte value. SignedAccountId32AsNative, @@ -182,14 +187,25 @@ pub type Barrier = TrailingSetTopicAsId< /// Type alias to conveniently refer to `frame_system`'s `Config::AccountId`. pub type AccountIdOf = ::AccountId; -/// Asset filter that allows all assets from a certain location. +/// Asset filter that allows all assets from a certain location matching asset id. pub struct AssetsFrom(PhantomData); impl> ContainsPair for AssetsFrom { fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { let loc = T::get(); &loc == origin && matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } - if asset_loc.match_and_split(&loc).is_some()) + if asset_loc.starts_with(&loc)) + } +} + +/// Asset filter that allows native/relay asset if coming from a certain location. +pub struct NativeAssetFrom(PhantomData); +impl> ContainsPair for NativeAssetFrom { + fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { + let loc = T::get(); + &loc == origin && + matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } + if *asset_loc == MultiLocation::from(Parent)) } } @@ -208,56 +224,19 @@ where /// A `HandleCredit` implementation that naively transfers the fees to the block author. /// Will drop and burn the assets in case the transfer fails. pub struct AssetsToBlockAuthor(PhantomData); -impl HandleCredit, pallet_assets::Pallet> for AssetsToBlockAuthor +impl HandleCredit, pallet_assets::Pallet> for AssetsToBlockAuthor where - R: pallet_authorship::Config + pallet_assets::Config, + R: pallet_authorship::Config + pallet_assets::Config, AccountIdOf: From + Into, { - fn handle_credit(credit: Credit, pallet_assets::Pallet>) { + fn handle_credit(credit: Credit, pallet_assets::Pallet>) { if let Some(author) = pallet_authorship::Pallet::::author() { // In case of error: Will drop the result triggering the `OnDrop` of the imbalance. - let _ = pallet_assets::Pallet::::resolve(&author, credit); + let _ = pallet_assets::Pallet::::resolve(&author, credit); } } } -pub trait Reserve { - /// Returns assets reserve location. - fn reserve(&self) -> Option; -} - -// Takes the chain part of a MultiAsset -impl Reserve for MultiAsset { - fn reserve(&self) -> Option { - if let AssetId::Concrete(location) = self.id { - let first_interior = location.first_interior(); - let parents = location.parent_count(); - match (parents, first_interior) { - (0, Some(Parachain(id))) => Some(MultiLocation::new(0, X1(Parachain(*id)))), - (1, Some(Parachain(id))) => Some(MultiLocation::new(1, X1(Parachain(*id)))), - (1, _) => Some(MultiLocation::parent()), - _ => None, - } - } else { - None - } - } -} - -/// A `FilterAssetLocation` implementation. Filters multi native assets whose -/// reserve is same with `origin`. -pub struct MultiNativeAsset; -impl ContainsPair for MultiNativeAsset { - fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { - if let Some(ref reserve) = asset.reserve() { - if reserve == origin { - return true - } - } - false - } -} - parameter_types! { /// The location that this chain recognizes as the Relay network's Asset Hub. pub SystemAssetHubLocation: MultiLocation = MultiLocation::new(1, X1(Parachain(1000))); @@ -268,7 +247,8 @@ parameter_types! { pub CheckingAccount: AccountId = PolkadotXcm::check_account(); } -pub type Reserves = (NativeAsset, AssetsFrom); +pub type Reserves = + (NativeAsset, AssetsFrom, NativeAssetFrom); pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { @@ -277,7 +257,8 @@ impl xcm_executor::Config for XcmConfig { // How to withdraw and deposit an asset. type AssetTransactor = AssetTransactors; type OriginConverter = XcmOriginToTransactDispatchOrigin; - type IsReserve = MultiNativeAsset; // TODO: maybe needed to be replaced by Reserves + type IsReserve = Reserves; + // no teleport trust established with other chains type IsTeleporter = NativeAsset; type UniversalLocation = UniversalLocation; type Barrier = Barrier; @@ -312,11 +293,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -342,8 +318,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 4cb83ccf820..6df00d43e8d 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -492,11 +492,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -518,8 +513,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/scripts/bridges_rococo_westend.sh b/cumulus/scripts/bridges_rococo_westend.sh index ce8480685aa..82b5f1942b2 100755 --- a/cumulus/scripts/bridges_rococo_westend.sh +++ b/cumulus/scripts/bridges_rococo_westend.sh @@ -301,9 +301,21 @@ case "$1" in 0 \ "Unlimited" ;; + withdraw-reserve-assets-from-asset-hub-rococo-local) + ensure_polkadot_js_api + # send back only 100000000000 wrappedWNDs to Alice account on AHW + limited_reserve_transfer_assets \ + "ws://127.0.0.1:9910" \ + "//Alice" \ + "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": 140000000000 } } ] }')" \ + 0 \ + "Unlimited" + ;; reserve-transfer-assets-from-asset-hub-westend-local) ensure_polkadot_js_api - # send WOCs to Alice account on AHR + # send WNDs to Alice account on AHR limited_reserve_transfer_assets \ "ws://127.0.0.1:9010" \ "//Alice" \ @@ -313,6 +325,18 @@ case "$1" in 0 \ "Unlimited" ;; + withdraw-reserve-assets-from-asset-hub-westend-local) + ensure_polkadot_js_api + # send back only 100000000000 wrappedROCs to Alice account on AHR + limited_reserve_transfer_assets \ + "ws://127.0.0.1:9010" \ + "//Alice" \ + "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": 100000000000 } } ] }')" \ + 0 \ + "Unlimited" + ;; claim-rewards-bridge-hub-rococo-local) ensure_polkadot_js_api # bhwd -> [62, 68, 77, 64] -> 0x62687764 @@ -360,7 +384,9 @@ case "$1" in - init-asset-hub-westend-local - init-bridge-hub-westend-local - reserve-transfer-assets-from-asset-hub-rococo-local + - withdraw-reserve-assets-from-asset-hub-rococo-local - reserve-transfer-assets-from-asset-hub-westend-local + - withdraw-reserve-assets-from-asset-hub-westend-local - claim-rewards-bridge-hub-rococo-local - claim-rewards-bridge-hub-westend-local"; exit 1 diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs index 7ff5512d214..f2e4ff397c4 100644 --- a/cumulus/xcm/xcm-emulator/src/lib.rs +++ b/cumulus/xcm/xcm-emulator/src/lib.rs @@ -1443,9 +1443,9 @@ pub struct TestContext { /// These arguments can be easily reused and shared between the assertion functions /// and dispatchable functions, which are also stored in `Test`. /// `Origin` corresponds to the chain where the XCM interaction starts with an initial execution. -/// `Destination` corresponds to the last chain where an effect of the intial execution is expected -/// happen. `Hops` refer all the ordered intermediary chains an initial XCM execution can provoke -/// some effect. +/// `Destination` corresponds to the last chain where an effect of the initial execution is expected +/// to happen. `Hops` refer to all the ordered intermediary chains an initial XCM execution can +/// provoke some effect on. #[derive(Clone)] pub struct Test where @@ -1499,7 +1499,7 @@ where let chain_name = std::any::type_name::(); self.hops_assertion.insert(chain_name.to_string(), assertion); } - /// Stores an assertion in a particular Chain + /// Stores a dispatchable in a particular Chain pub fn set_dispatchable(&mut self, dispatchable: fn(Self) -> DispatchResult) { let chain_name = std::any::type_name::(); self.hops_dispatchable.insert(chain_name.to_string(), dispatchable); diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index cd73d23bdad..ef9dfedd735 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -2064,7 +2064,7 @@ impl Pallet { } /// Submits a given PVF check statement with corresponding signature as an unsigned transaction - /// into the memory pool. Ultimately, that disseminates the transaction accross the network. + /// into the memory pool. Ultimately, that disseminates the transaction across the network. /// /// This function expects an offchain context and cannot be callable from the on-chain logic. /// diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 57767b70d23..40ef22107a7 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1588,7 +1588,7 @@ mod benches { [pallet_asset_rate, AssetRate] [pallet_whitelist, Whitelist] // XCM - [pallet_xcm, XcmPallet] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] [pallet_xcm_benchmarks::fungible, pallet_xcm_benchmarks::fungible::Pallet::] [pallet_xcm_benchmarks::generic, pallet_xcm_benchmarks::generic::Pallet::] ); @@ -2064,6 +2064,8 @@ sp_api::impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; use frame_benchmarking::baseline::Pallet as Baseline; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -2081,6 +2083,7 @@ sp_api::impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use frame_system_benchmarking::Pallet as SystemBench; use frame_benchmarking::baseline::Pallet as Baseline; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; use sp_storage::TrackedStorageKey; use xcm::latest::prelude::*; use xcm_config::{ @@ -2097,6 +2100,33 @@ sp_api::impl_runtime_apis! { impl frame_system_benchmarking::Config for Runtime {} impl frame_benchmarking::baseline::Config for Runtime {} + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(crate::xcm_config::AssetHub::get()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported to/from AH. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }, + crate::xcm_config::AssetHub::get(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay can reserve transfer native token to some random parachain. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }, + Parachain(43211234).into(), + )) + } + } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = XcmConfig; type AccountIdConverter = LocationConverter; diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 0814b77414f..c8f8f59dae9 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -211,11 +211,6 @@ parameter_types! { pub const FellowsBodyId: BodyId = BodyId::Technical; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(ASSET_HUB_ID).into()); -} - /// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior /// location of this chain. pub type LocalOriginToLocation = ( @@ -269,7 +264,5 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/runtime/test-runtime/src/xcm_config.rs b/polkadot/runtime/test-runtime/src/xcm_config.rs index 400658b1386..ae4faecf700 100644 --- a/polkadot/runtime/test-runtime/src/xcm_config.rs +++ b/polkadot/runtime/test-runtime/src/xcm_config.rs @@ -127,11 +127,6 @@ impl xcm_executor::Config for XcmConfig { type Aliasers = Nothing; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(xcm::latest::Junctions::Here.into()); -} - impl pallet_xcm::Config for crate::Runtime { // The config types here are entirely configurable, since the only one that is sorely needed // is `XcmExecutor`, which will be used in unit tests located in xcm-executor. @@ -157,7 +152,5 @@ impl pallet_xcm::Config for crate::Runtime { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 1c97e54da48..d80640c016f 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1626,7 +1626,7 @@ mod benches { [pallet_whitelist, Whitelist] [pallet_asset_rate, AssetRate] // XCM - [pallet_xcm, XcmPallet] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -2145,6 +2145,7 @@ sp_api::impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; @@ -2172,12 +2173,37 @@ sp_api::impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; impl pallet_session_benchmarking::Config for Runtime {} impl pallet_offences_benchmarking::Config for Runtime {} impl pallet_election_provider_support_benchmarking::Config for Runtime {} + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(crate::xcm_config::AssetHub::get()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported to/from AH. + Some(( + MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) }, + crate::xcm_config::AssetHub::get(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay can reserve transfer native token to some random parachain. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }, + crate::Junction::Parachain(43211234).into(), + )) + } + } impl frame_system_benchmarking::Config for Runtime {} impl pallet_nomination_pools_benchmarking::Config for Runtime {} impl runtime_parachains::disputes::slashing::benchmarking::Config for Runtime {} diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 64e07317fc7..9ab6470f6da 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -119,11 +119,6 @@ parameter_types! { pub const MaxAssetsIntoHolding: u32 = 64; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(ASSET_HUB_ID).into()); -} - pub type TrustedTeleporters = ( xcm_builder::Case, xcm_builder::Case, @@ -265,7 +260,5 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 6b5d5e75de8..cc5d7d97c45 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -13,7 +13,6 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive serde = { version = "1.0.188", optional = true, features = ["derive"] } log = { version = "0.4.17", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../substrate/frame/support", default-features = false} frame-system = { path = "../../../substrate/frame/system", default-features = false} sp-core = { path = "../../../substrate/primitives/core", default-features = false} @@ -25,8 +24,12 @@ xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } +# marked optional, used in benchmarking +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false, optional = true } + [dev-dependencies] -pallet-balances = { path = "../../../substrate/frame/balances" } +pallet-assets = { path = "../../../substrate/frame/assets" } polkadot-runtime-parachains = { path = "../../runtime/parachains" } polkadot-parachain-primitives = { path = "../../parachain" } @@ -39,6 +42,7 @@ std = [ "frame-support/std", "frame-system/std", "log/std", + "pallet-balances/std", "scale-info/std", "serde", "sp-core/std", @@ -53,6 +57,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", @@ -63,6 +68,7 @@ runtime-benchmarks = [ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", + "pallet-assets/try-runtime", "pallet-balances/try-runtime", "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index 3eecbfec518..3aca24791fc 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -16,15 +16,56 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; -use frame_benchmarking::{benchmarks, BenchmarkError, BenchmarkResult}; -use frame_support::weights::Weight; +use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; +use frame_support::{traits::Currency, weights::Weight}; use frame_system::RawOrigin; use sp_std::prelude::*; use xcm::{latest::prelude::*, v2}; type RuntimeOrigin = ::RuntimeOrigin; +// existential deposit multiplier +const ED_MULTIPLIER: u32 = 100; + +/// Pallet we're benchmarking here. +pub struct Pallet(crate::Pallet); + +/// Trait that must be implemented by runtime to be able to benchmark pallet properly. +pub trait Config: crate::Config { + /// A `MultiLocation` that can be reached via `XcmRouter`. Used only in benchmarks. + /// + /// If `None`, the benchmarks that depend on a reachable destination will be skipped. + fn reachable_dest() -> Option { + None + } + + /// A `(MultiAsset, MultiLocation)` pair representing asset and the destination it can be + /// teleported to. Used only in benchmarks. + /// + /// Implementation should also make sure `dest` is reachable/connected. + /// + /// If `None`, the benchmarks that depend on this will be skipped. + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + None + } + + /// A `(MultiAsset, MultiLocation)` pair representing asset and the destination it can be + /// reserve-transferred to. Used only in benchmarks. + /// + /// Implementation should also make sure `dest` is reachable/connected. + /// + /// If `None`, the benchmarks that depend on this will be skipped. + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + None + } +} + benchmarks! { + where_clause { + where + T: pallet_balances::Config, + ::Balance: From + Into, + } send { let send_origin = T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; @@ -32,7 +73,7 @@ benchmarks! { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let msg = Xcm(vec![ClearOrigin]); - let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( + let versioned_dest: VersionedMultiLocation = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )? .into(); @@ -40,44 +81,82 @@ benchmarks! { }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) teleport_assets { - let asset: MultiAsset = (Here, 10).into(); - let send_origin = - T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone()) + let (asset, destination) = T::teleportable_asset_and_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; + + let transferred_amount = match &asset.fun { + Fungible(amount) => *amount, + _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), + }.into(); + let assets: MultiAssets = asset.into(); + + let existential_deposit = T::ExistentialDeposit::get(); + let caller = whitelisted_caller(); + + // Give some multiple of the existential deposit + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + assert!(balance >= transferred_amount); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + // verify initial balance + assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); + + let send_origin = RawOrigin::Signed(caller.clone()); + let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmTeleportFilter::contains(&(origin_location, vec![asset.clone()])) { + if !T::XcmTeleportFilter::contains(&(origin_location, assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let recipient = [0u8; 32]; - let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )? - .into(); + let versioned_dest: VersionedMultiLocation = destination.into(); let versioned_beneficiary: VersionedMultiLocation = AccountId32 { network: None, id: recipient.into() }.into(); - let versioned_assets: VersionedMultiAssets = asset.into(); - }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + let versioned_assets: VersionedMultiAssets = assets.into(); + }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + verify { + // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) + assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); + } reserve_transfer_assets { - let asset: MultiAsset = (Here, 10).into(); - let send_origin = - T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone()) + let (asset, destination) = T::reserve_transferable_asset_and_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; + + let transferred_amount = match &asset.fun { + Fungible(amount) => *amount, + _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), + }.into(); + let assets: MultiAssets = asset.into(); + + let existential_deposit = T::ExistentialDeposit::get(); + let caller = whitelisted_caller(); + + // Give some multiple of the existential deposit + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + assert!(balance >= transferred_amount); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + // verify initial balance + assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); + + let send_origin = RawOrigin::Signed(caller.clone()); + let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmReserveTransferFilter::contains(&(origin_location, vec![asset.clone()])) { + if !T::XcmReserveTransferFilter::contains(&(origin_location, assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let recipient = [0u8; 32]; - let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )? - .into(); + let versioned_dest: VersionedMultiLocation = destination.into(); let versioned_beneficiary: VersionedMultiLocation = AccountId32 { network: None, id: recipient.into() }.into(); - let versioned_assets: VersionedMultiAssets = asset.into(); - }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + let versioned_assets: VersionedMultiAssets = assets.into(); + }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + verify { + // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) + assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); + } execute { let execute_origin = @@ -92,7 +171,7 @@ benchmarks! { }: _>(execute_origin, Box::new(versioned_msg), Weight::zero()) force_xcm_version { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; let xcm_version = 2; @@ -101,18 +180,18 @@ benchmarks! { force_default_xcm_version {}: _(RawOrigin::Root, Some(2)) force_subscribe_version_notify { - let versioned_loc: VersionedMultiLocation = T::ReachableDest::get().ok_or( + let versioned_loc: VersionedMultiLocation = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )? .into(); }: _(RawOrigin::Root, Box::new(versioned_loc)) force_unsubscribe_version_notify { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; let versioned_loc: VersionedMultiLocation = loc.into(); - let _ = Pallet::::request_version_notify(loc); + let _ = crate::Pallet::::request_version_notify(loc); }: _(RawOrigin::Root, Box::new(versioned_loc)) force_suspension {}: _(RawOrigin::Root, true) @@ -122,7 +201,7 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); SupportedVersion::::insert(old_version, loc, old_version); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateSupportedVersion, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateSupportedVersion, Weight::zero()); } migrate_version_notifiers { @@ -130,22 +209,22 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); VersionNotifiers::::insert(old_version, loc, 0); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateVersionNotifiers, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateVersionNotifiers, Weight::zero()); } already_notified_target { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads(1))), )?; let loc = VersionedMultiLocation::from(loc); let current_version = T::AdvertisedXcmVersion::get(); VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); } notify_current_targets { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), )?; let loc = VersionedMultiLocation::from(loc); @@ -153,7 +232,7 @@ benchmarks! { let old_version = current_version - 1; VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), old_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); } notify_target_migration_fail { @@ -167,7 +246,7 @@ benchmarks! { let current_version = T::AdvertisedXcmVersion::get(); VersionNotifyTargets::::insert(current_version, bad_loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } migrate_version_notify_targets { @@ -176,18 +255,18 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } migrate_and_notify_old_targets { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), )?; let loc = VersionedMultiLocation::from(loc); let old_version = T::AdvertisedXcmVersion::get() - 1; VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), old_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } new_query { @@ -195,14 +274,14 @@ benchmarks! { let timeout = 1u32.into(); let match_querier = MultiLocation::from(Here); }: { - Pallet::::new_query(responder, timeout, match_querier); + crate::Pallet::::new_query(responder, timeout, match_querier); } take_response { let responder = MultiLocation::from(Parent); let timeout = 1u32.into(); let match_querier = MultiLocation::from(Here); - let query_id = Pallet::::new_query(responder, timeout, match_querier); + let query_id = crate::Pallet::::new_query(responder, timeout, match_querier); let infos = (0 .. xcm::v3::MaxPalletsInfo::get()).map(|_| PalletInfo::new( u32::MAX, (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), @@ -211,10 +290,10 @@ benchmarks! { u32::MAX, u32::MAX, ).unwrap()).collect::>(); - Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); + crate::Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); }: { - as QueryHandler>::take_response(query_id); + as QueryHandler>::take_response(query_id); } impl_benchmark_test_suite!( diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 2d969fb870c..8157620465f 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -19,7 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "runtime-benchmarks")] -mod benchmarking; +pub mod benchmarking; #[cfg(test)] mod mock; #[cfg(test)] @@ -55,9 +55,9 @@ use xcm_builder::{ }; use xcm_executor::{ traits::{ - CheckSuspension, ClaimAssets, ConvertLocation, ConvertOrigin, DropAssets, MatchesFungible, - OnResponse, Properties, QueryHandler, QueryResponseStatus, VersionChangeNotifier, - WeightBounds, + AssetTransferError, CheckSuspension, ClaimAssets, ConvertLocation, ConvertOrigin, + DropAssets, MatchesFungible, OnResponse, Properties, QueryHandler, QueryResponseStatus, + TransactAsset, TransferType, VersionChangeNotifier, WeightBounds, XcmAssetTransfers, }, Assets, }; @@ -222,7 +222,7 @@ pub mod pallet { type XcmExecuteFilter: Contains<(MultiLocation, Xcm<::RuntimeCall>)>; /// Something to execute an XCM message. - type XcmExecutor: ExecuteXcm<::RuntimeCall>; + type XcmExecutor: ExecuteXcm<::RuntimeCall> + XcmAssetTransfers; /// Our XCM filter which messages to be teleported using the dedicated extrinsic must pass. type XcmTeleportFilter: Contains<(MultiLocation, Vec)>; @@ -275,12 +275,6 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; - - /// A `MultiLocation` that can be reached via `XcmRouter`. Used only in benchmarks. - /// - /// If `None`, the benchmarks that depend on a reachable destination will be skipped. - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest: Get>; } impl ExecuteControllerWeightInfo for Pallet { @@ -531,8 +525,8 @@ pub mod pallet { NoSubscription, /// The location is invalid since it already has a subscription from us. AlreadySubscribed, - /// Invalid asset for the operation. - InvalidAsset, + /// Could not check-out the assets for teleportation to the destination chain. + CannotCheckOutTeleport, /// The owner does not own (all) of the asset that they wish to do the operation on. LowBalance, /// The asset owner has too many locks on the asset. @@ -545,6 +539,16 @@ pub mod pallet { LockNotFound, /// The unlock operation cannot succeed because there are still consumers of the lock. InUse, + /// Invalid non-concrete asset. + InvalidAssetNotConcrete, + /// Invalid asset, reserve chain could not be determined for it. + InvalidAssetUnknownReserve, + /// Invalid asset, do not support remote asset reserves with different fees reserves. + InvalidAssetUnsupportedReserve, + /// Too many assets with different reserve locations have been attempted for transfer. + TooManyReserves, + /// Local XCM execution of asset transfer incomplete. + LocalExecutionIncomplete, } impl From for Error { @@ -557,6 +561,15 @@ pub mod pallet { } } + impl From for Error { + fn from(e: AssetTransferError) -> Self { + match e { + AssetTransferError::NotConcrete => Error::::InvalidAssetNotConcrete, + AssetTransferError::UnknownReserve => Error::::InvalidAssetUnknownReserve, + } + } + } + /// The status of a query. #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum QueryStatus { @@ -907,11 +920,7 @@ pub mod pallet { let mut message = Xcm(vec![ WithdrawAsset(assets), SetFeesMode { jit_withdraw: true }, - InitiateTeleport { - assets: Wild(AllCounted(count)), - dest, - xcm: Xcm(vec![]), - }, + InitiateTeleport { assets: Wild(AllCounted(count)), dest, xcm: Xcm(vec![]) }, ]); T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::teleport_assets().saturating_add(w)) } @@ -954,6 +963,8 @@ pub mod pallet { match (maybe_assets, maybe_dest) { (Ok(assets), Ok(dest)) => { use sp_std::vec; + // heaviest version of locally executed XCM program: equivalent in weight to + // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM let mut message = Xcm(vec![ SetFeesMode { jit_withdraw: true }, TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } @@ -1114,6 +1125,8 @@ pub mod pallet { match (maybe_assets, maybe_dest) { (Ok(assets), Ok(dest)) => { use sp_std::vec; + // heaviest version of locally executed XCM program: equivalent in weight to + // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM let mut message = Xcm(vec![ SetFeesMode { jit_withdraw: true }, TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } @@ -1273,6 +1286,33 @@ impl QueryHandler for Pallet { } impl Pallet { + /// Validate `assets` to be reserve-transferred and return their reserve location. + fn validate_assets_and_find_reserve( + assets: &[MultiAsset], + dest: &MultiLocation, + ) -> Result> { + let mut reserve = None; + for asset in assets.iter() { + if let Fungible(x) = asset.fun { + // If fungible asset, ensure non-zero amount. + ensure!(!x.is_zero(), Error::::Empty); + } + let transfer_type = + T::XcmExecutor::determine_for(&asset, dest).map_err(Error::::from)?; + // Ensure asset is not teleportable to `dest`. + ensure!(transfer_type != TransferType::Teleport, Error::::Filtered); + if let Some(reserve) = reserve.as_ref() { + // Ensure transfer for multiple assets uses same reserve location (only fee may have + // different reserve location) + ensure!(reserve == &transfer_type, Error::::TooManyReserves); + } else { + // asset reserve identified + reserve = Some(transfer_type); + } + } + reserve.ok_or(Error::::Empty) + } + fn do_reserve_transfer_assets( origin: OriginFor, dest: Box, @@ -1286,35 +1326,75 @@ impl Pallet { let beneficiary: MultiLocation = (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; let assets: MultiAssets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + log::trace!( + target: "xcm::pallet_xcm::do_reserve_transfer_assets", + "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, fee-idx {:?}", + origin_location, dest, beneficiary, assets, fee_asset_item, + ); ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); let value = (origin_location, assets.into_inner()); ensure!(T::XcmReserveTransferFilter::contains(&value), Error::::Filtered); - let (origin_location, assets) = value; - let context = T::UniversalLocation::get(); - let fees = assets - .get(fee_asset_item as usize) - .ok_or(Error::::Empty)? - .clone() - .reanchored(&dest, context) - .map_err(|_| Error::::CannotReanchor)?; - let max_assets = assets.len() as u32; - let assets: MultiAssets = assets.into(); - let xcm = Xcm(vec![ - BuyExecution { fees, weight_limit }, - DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, - ]); - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - TransferReserveAsset { assets, dest, xcm }, - ]); - let weight = - T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; - let hash = message.using_encoded(sp_io::hashing::blake2_256); - let outcome = - T::XcmExecutor::execute_xcm_in_credit(origin_location, message, hash, weight, weight); - Self::deposit_event(Event::Attempted { outcome }); - Ok(()) + let (origin_location, mut assets) = value; + + if fee_asset_item as usize >= assets.len() { + return Err(Error::::Empty.into()) + } + let fees = assets.swap_remove(fee_asset_item as usize); + let fees_transfer_type = + T::XcmExecutor::determine_for(&fees, &dest).map_err(Error::::from)?; + let assets_transfer_type = if assets.is_empty() { + // Single asset to transfer (one used for fees where transfer type is determined above). + ensure!(fees_transfer_type != TransferType::Teleport, Error::::Filtered); + fees_transfer_type + } else { + // Find reserve for non-fee assets. + Self::validate_assets_and_find_reserve(&assets, &dest)? + }; + + // local and remote XCM programs to potentially handle fees separately + let separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>; + if fees_transfer_type == assets_transfer_type { + // Same reserve location (fees not teleportable), we can batch together fees and assets + // in same reserve-based-transfer. + assets.push(fees.clone()); + // no need for custom fees instructions, fees are batched with assets + separate_fees_instructions = None; + } else { + // Disallow _remote reserves_ unless assets & fees have same remote reserve (covered by + // branch above). The reason for this is that we'd need to send XCMs to separate chains + // with no guarantee of delivery order on final destination; therefore we cannot + // guarantee to have fees in place on final destination chain to pay for assets + // transfer. + ensure!( + !matches!(assets_transfer_type, TransferType::RemoteReserve(_)), + Error::::InvalidAssetUnsupportedReserve + ); + let fees = fees.clone(); + let weight_limit = weight_limit.clone(); + // build fees transfer instructions to be added to assets transfers XCM programs + separate_fees_instructions = Some(match fees_transfer_type { + TransferType::LocalReserve => + Self::local_reserve_fees_instructions(dest, fees, weight_limit)?, + TransferType::DestinationReserve => + Self::destination_reserve_fees_instructions(dest, fees, weight_limit)?, + TransferType::Teleport => + Self::teleport_fees_instructions(dest, fees, weight_limit)?, + TransferType::RemoteReserve(_) => + return Err(Error::::InvalidAssetUnsupportedReserve.into()), + }); + }; + + Self::build_and_execute_xcm_transfer_type( + origin_location, + dest, + beneficiary, + assets, + assets_transfer_type, + fees, + separate_fees_instructions, + weight_limit, + ) } fn do_teleport_assets( @@ -1335,31 +1415,384 @@ impl Pallet { let value = (origin_location, assets.into_inner()); ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); let (origin_location, assets) = value; + for asset in assets.iter() { + let transfer_type = + T::XcmExecutor::determine_for(asset, &dest).map_err(Error::::from)?; + ensure!(matches!(transfer_type, TransferType::Teleport), Error::::Filtered); + } + let fees = assets.get(fee_asset_item as usize).ok_or(Error::::Empty)?.clone(); + + Self::build_and_execute_xcm_transfer_type( + origin_location, + dest, + beneficiary, + assets, + TransferType::Teleport, + fees, + None, + weight_limit, + ) + } + + fn build_and_execute_xcm_transfer_type( + origin: MultiLocation, + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + transfer_type: TransferType, + fees: MultiAsset, + separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>, + weight_limit: WeightLimit, + ) -> DispatchResult { + log::trace!( + target: "xcm::pallet_xcm::build_and_execute_xcm_transfer_type", + "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, transfer_type {:?}, \ + fees {:?}, fees_xcm: {:?}, weight_limit: {:?}", + origin, dest, beneficiary, assets, transfer_type, fees, separate_fees_instructions, weight_limit, + ); + let (mut local_xcm, remote_xcm) = match transfer_type { + TransferType::LocalReserve => { + let (local, remote) = Self::local_reserve_transfer_programs( + dest, + beneficiary, + assets, + fees, + separate_fees_instructions, + weight_limit, + )?; + (local, Some(remote)) + }, + TransferType::DestinationReserve => { + let (local, remote) = Self::destination_reserve_transfer_programs( + dest, + beneficiary, + assets, + fees, + separate_fees_instructions, + weight_limit, + )?; + (local, Some(remote)) + }, + TransferType::RemoteReserve(reserve) => ( + Self::remote_reserve_transfer_program( + reserve, + dest, + beneficiary, + assets, + fees, + weight_limit, + )?, + None, + ), + TransferType::Teleport => ( + Self::teleport_assets_program(dest, beneficiary, assets, fees, weight_limit)?, + None, + ), + }; + let weight = + T::Weigher::weight(&mut local_xcm).map_err(|()| Error::::UnweighableMessage)?; + let hash = local_xcm.using_encoded(sp_io::hashing::blake2_256); + let outcome = + T::XcmExecutor::execute_xcm_in_credit(origin, local_xcm, hash, weight, weight); + Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); + if let Some(remote_xcm) = remote_xcm { + outcome.ensure_complete().map_err(|_| Error::::LocalExecutionIncomplete)?; + + let (ticket, price) = validate_send::(dest, remote_xcm.clone()) + .map_err(Error::::from)?; + if origin != Here.into_location() { + Self::charge_fees(origin, price).map_err(|_| Error::::FeesNotMet)?; + } + let message_id = T::XcmRouter::deliver(ticket).map_err(Error::::from)?; + + let e = Event::Sent { origin, destination: dest, message: remote_xcm, message_id }; + Self::deposit_event(e); + } + Ok(()) + } + + fn local_reserve_fees_instructions( + dest: MultiLocation, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { let context = T::UniversalLocation::get(); - let fees = assets - .get(fee_asset_item as usize) - .ok_or(Error::::Empty)? + let reanchored_fees = fees .clone() .reanchored(&dest, context) .map_err(|_| Error::::CannotReanchor)?; - let max_assets = assets.len() as u32; + + let local_execute_xcm = Xcm(vec![ + // move `fees` to `dest`s local sovereign account + TransferAsset { assets: fees.into(), beneficiary: dest }, + ]); + let xcm_on_dest = Xcm(vec![ + // let (dest) chain know `fees` are in its SA on reserve + ReserveAssetDeposited(reanchored_fees.clone().into()), + // buy exec using `fees` in holding deposited in above instruction + BuyExecution { fees: reanchored_fees, weight_limit }, + ]); + Ok((local_execute_xcm, xcm_on_dest)) + } + + fn local_reserve_transfer_programs( + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + fees: MultiAsset, + separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + // max assets is `assets` (+ potentially separately handled fee) + let max_assets = + assets.len() as u32 + separate_fees_instructions.as_ref().map(|_| 1).unwrap_or(0); + let assets: MultiAssets = assets.into(); + let context = T::UniversalLocation::get(); + let mut reanchored_assets = assets.clone(); + reanchored_assets + .reanchor(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + + // fees are either handled through dedicated instructions, or batched together with assets + let fees_already_handled = separate_fees_instructions.is_some(); + let (fees_local_xcm, fees_remote_xcm) = separate_fees_instructions + .map(|(local, remote)| (local.into_inner(), remote.into_inner())) + .unwrap_or_default(); + + // start off with any necessary local fees specific instructions + let mut local_execute_xcm = fees_local_xcm; + // move `assets` to `dest`s local sovereign account + local_execute_xcm.push(TransferAsset { assets, beneficiary: dest }); + + // on destination chain, start off with custom fee instructions + let mut xcm_on_dest = fees_remote_xcm; + // continue with rest of assets + xcm_on_dest.extend_from_slice(&[ + // let (dest) chain know assets are in its SA on reserve + ReserveAssetDeposited(reanchored_assets), + // following instructions are not exec'ed on behalf of origin chain anymore + ClearOrigin, + ]); + if !fees_already_handled { + // no custom fees instructions, they are batched together with `assets` transfer; + // BuyExecution happens after receiving all `assets` + let reanchored_fees = + fees.reanchored(&dest, context).map_err(|_| Error::::CannotReanchor)?; + // buy execution using `fees` batched together with above `reanchored_assets` + xcm_on_dest.push(BuyExecution { fees: reanchored_fees, weight_limit }); + } + // deposit all remaining assets in holding to `beneficiary` location + xcm_on_dest.push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + + Ok((Xcm(local_execute_xcm), Xcm(xcm_on_dest))) + } + + fn destination_reserve_fees_instructions( + dest: MultiLocation, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + let context = T::UniversalLocation::get(); + let reanchored_fees = fees + .clone() + .reanchored(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + let fees: MultiAssets = fees.into(); + + let local_execute_xcm = Xcm(vec![ + // withdraw reserve-based fees (derivatives) + WithdrawAsset(fees.clone()), + // burn derivatives + BurnAsset(fees), + ]); + let xcm_on_dest = Xcm(vec![ + // withdraw `fees` from origin chain's sovereign account + WithdrawAsset(reanchored_fees.clone().into()), + // buy exec using `fees` in holding withdrawn in above instruction + BuyExecution { fees: reanchored_fees, weight_limit }, + ]); + Ok((local_execute_xcm, xcm_on_dest)) + } + + fn destination_reserve_transfer_programs( + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + fees: MultiAsset, + separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + // max assets is `assets` (+ potentially separately handled fee) + let max_assets = + assets.len() as u32 + separate_fees_instructions.as_ref().map(|_| 1).unwrap_or(0); let assets: MultiAssets = assets.into(); - let xcm = Xcm(vec![ + let context = T::UniversalLocation::get(); + let mut reanchored_assets = assets.clone(); + reanchored_assets + .reanchor(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + + // fees are either handled through dedicated instructions, or batched together with assets + let fees_already_handled = separate_fees_instructions.is_some(); + let (fees_local_xcm, fees_remote_xcm) = separate_fees_instructions + .map(|(local, remote)| (local.into_inner(), remote.into_inner())) + .unwrap_or_default(); + + // start off with any necessary local fees specific instructions + let mut local_execute_xcm = fees_local_xcm; + // continue with rest of assets + local_execute_xcm.extend_from_slice(&[ + // withdraw reserve-based assets + WithdrawAsset(assets.clone()), + // burn reserve-based assets + BurnAsset(assets), + ]); + + // on destination chain, start off with custom fee instructions + let mut xcm_on_dest = fees_remote_xcm; + // continue with rest of assets + xcm_on_dest.extend_from_slice(&[ + // withdraw `assets` from origin chain's sovereign account + WithdrawAsset(reanchored_assets), + // following instructions are not exec'ed on behalf of origin chain anymore + ClearOrigin, + ]); + if !fees_already_handled { + // no custom fees instructions, they are batched together with `assets` transfer; + // BuyExecution happens after receiving all `assets` + let reanchored_fees = + fees.reanchored(&dest, context).map_err(|_| Error::::CannotReanchor)?; + // buy execution using `fees` batched together with above `reanchored_assets` + xcm_on_dest.push(BuyExecution { fees: reanchored_fees, weight_limit }); + } + // deposit all remaining assets in holding to `beneficiary` location + xcm_on_dest.push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + + Ok((Xcm(local_execute_xcm), Xcm(xcm_on_dest))) + } + + // function assumes fees and assets have the same remote reserve + fn remote_reserve_transfer_program( + reserve: MultiLocation, + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result::RuntimeCall>, Error> { + let max_assets = assets.len() as u32; + let context = T::UniversalLocation::get(); + // we spend up to half of fees for execution on reserve and other half for execution on + // destination + let (fees_half_1, fees_half_2) = Self::halve_fees(fees)?; + // identifies fee item as seen by `reserve` - to be used at reserve chain + let reserve_fees = fees_half_1 + .reanchored(&reserve, context) + .map_err(|_| Error::::CannotReanchor)?; + // identifies fee item as seen by `dest` - to be used at destination chain + let dest_fees = + fees_half_2.reanchored(&dest, context).map_err(|_| Error::::CannotReanchor)?; + // identifies `dest` as seen by `reserve` + let dest = dest.reanchored(&reserve, context).map_err(|_| Error::::CannotReanchor)?; + // xcm to be executed at dest + let xcm_on_dest = Xcm(vec![ + BuyExecution { fees: dest_fees, weight_limit: weight_limit.clone() }, + DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, + ]); + // xcm to be executed on reserve + let xcm_on_reserve = Xcm(vec![ + BuyExecution { fees: reserve_fees, weight_limit }, + DepositReserveAsset { assets: Wild(AllCounted(max_assets)), dest, xcm: xcm_on_dest }, + ]); + Ok(Xcm(vec![ + WithdrawAsset(assets.into()), + InitiateReserveWithdraw { + assets: Wild(AllCounted(max_assets)), + reserve, + xcm: xcm_on_reserve, + }, + ])) + } + + fn teleport_fees_instructions( + dest: MultiLocation, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + let context = T::UniversalLocation::get(); + let reanchored_fees = fees + .clone() + .reanchored(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + + // XcmContext irrelevant in teleports checks + let dummy_context = + XcmContext { origin: None, message_id: Default::default(), topic: None }; + // We should check that the asset can actually be teleported out (for this to + // be in error, there would need to be an accounting violation by ourselves, + // so it's unlikely, but we don't want to allow that kind of bug to leak into + // a trusted chain. + ::AssetTransactor::can_check_out( + &dest, + &fees, + &dummy_context, + ) + .map_err(|_| Error::::CannotCheckOutTeleport)?; + ::AssetTransactor::check_out( + &dest, + &fees, + &dummy_context, + ); + + let fees: MultiAssets = fees.into(); + let local_execute_xcm = Xcm(vec![ + // withdraw fees + WithdrawAsset(fees.clone()), + // burn fees + BurnAsset(fees), + ]); + let xcm_on_dest = Xcm(vec![ + // (dest) chain receive teleported assets burned on origin chain + ReceiveTeleportedAsset(reanchored_fees.clone().into()), + // buy exec using `fees` in holding received in above instruction + BuyExecution { fees: reanchored_fees, weight_limit }, + ]); + Ok((local_execute_xcm, xcm_on_dest)) + } + + fn teleport_assets_program( + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + mut fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result::RuntimeCall>, Error> { + let context = T::UniversalLocation::get(); + fees.reanchor(&dest, context).map_err(|_| Error::::CannotReanchor)?; + let max_assets = assets.len() as u32; + let xcm_on_dest = Xcm(vec![ BuyExecution { fees, weight_limit }, DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, ]); - let mut message = Xcm(vec![ - WithdrawAsset(assets), + Ok(Xcm(vec![ + WithdrawAsset(assets.into()), SetFeesMode { jit_withdraw: true }, - InitiateTeleport { assets: Wild(AllCounted(max_assets)), dest, xcm }, - ]); - let weight = - T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; - let hash = message.using_encoded(sp_io::hashing::blake2_256); - let outcome = - T::XcmExecutor::execute_xcm_in_credit(origin_location, message, hash, weight, weight); - Self::deposit_event(Event::Attempted { outcome }); - Ok(()) + InitiateTeleport { assets: Wild(AllCounted(max_assets)), dest, xcm: xcm_on_dest }, + ])) + } + + /// Halve `fees` fungible amount. + pub(crate) fn halve_fees(fees: MultiAsset) -> Result<(MultiAsset, MultiAsset), Error> { + match fees.fun { + Fungible(amount) => { + let fee1 = amount.saturating_div(2); + let fee2 = amount.saturating_sub(fee1); + ensure!(fee1 > 0, Error::::FeesNotMet); + ensure!(fee2 > 0, Error::::FeesNotMet); + Ok((MultiAsset::from((fees.id, fee1)), MultiAsset::from((fees.id, fee2)))) + }, + NonFungible(_) => Err(Error::::FeesNotMet), + } } /// Will always make progress, and will do its best not to use much more than `weight_cutoff` diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 3b41ad90ec9..026838993f1 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -17,7 +17,9 @@ use codec::Encode; use frame_support::{ construct_runtime, match_types, parameter_types, - traits::{ConstU32, Everything, EverythingBut, Nothing}, + traits::{ + AsEnsureOriginWithArg, ConstU128, ConstU32, Equals, Everything, EverythingBut, Nothing, + }, weights::Weight, }; use frame_system::EnsureRoot; @@ -32,11 +34,15 @@ use xcm::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, Case, ChildParachainAsNative, ChildParachainConvertsVia, - ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, FixedRateOfFungible, - FixedWeightBounds, IsConcrete, SignedAccountId32AsNative, SignedToAccountId32, + ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, DescribeAllTerminal, + FixedRateOfFungible, FixedWeightBounds, FungiblesAdapter, HashedDescription, IsConcrete, + MatchedConvertedConcreteId, NoChecking, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::XcmExecutor; +use xcm_executor::{ + traits::{Identity, JustTry}, + XcmExecutor, +}; use crate::{self as pallet_xcm, TestWeightInfo}; @@ -137,6 +143,7 @@ construct_runtime!( { System: frame_system::{Pallet, Call, Storage, Config, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Assets: pallet_assets::{Pallet, Call, Storage, Config, Event}, ParasOrigin: origin::{Pallet, Origin}, XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config}, TestNotifier: pallet_test_notifier::{Pallet, Call, Event}, @@ -179,13 +186,13 @@ impl SendXcm for TestSendXcmErrX8 { type Ticket = (MultiLocation, Xcm<()>); fn validate( dest: &mut Option, - msg: &mut Option>, + _: &mut Option>, ) -> SendResult<(MultiLocation, Xcm<()>)> { - let (dest, msg) = (dest.take().unwrap(), msg.take().unwrap()); - if dest.len() == 8 { + if dest.as_ref().unwrap().len() == 8 { + dest.take(); Err(SendError::Transport("Destination location full")) } else { - Ok(((dest, msg), MultiAssets::new())) + Err(SendError::NotApplicable) } } fn deliver(pair: (MultiLocation, Xcm<()>)) -> Result { @@ -280,18 +287,135 @@ impl pallet_balances::Config for Test { type MaxFreezes = ConstU32<0>; } +#[cfg(feature = "runtime-benchmarks")] +/// Simple conversion of `u32` into an `AssetId` for use in benchmarking. +pub struct XcmBenchmarkHelper; +#[cfg(feature = "runtime-benchmarks")] +impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { + fn create_asset_id_parameter(id: u32) -> MultiLocation { + MultiLocation { parents: 1, interior: X1(Parachain(id)) } + } +} + +impl pallet_assets::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = MultiLocation; + type AssetIdParameter = MultiLocation; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = EnsureRoot; + type AssetDeposit = ConstU128<1>; + type AssetAccountDeposit = ConstU128<10>; + type MetadataDepositBase = ConstU128<1>; + type MetadataDepositPerByte = ConstU128<1>; + type ApprovalDeposit = ConstU128<1>; + type StringLimit = ConstU32<50>; + type Freezer = (); + type WeightInfo = (); + type CallbackHandle = (); + type Extra = (); + type RemoveItemsLimit = ConstU32<5>; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = XcmBenchmarkHelper; +} + +// This child parachain is a system parachain trusted to teleport native token. +pub const SOME_SYSTEM_PARA: u32 = 1001; + +// This child parachain acts as trusted reserve for its assets in tests. +// USDT allowed to teleport to/from here. +pub const FOREIGN_ASSET_RESERVE_PARA_ID: u32 = 2001; +// Inner junction of reserve asset on `FOREIGN_ASSET_RESERVE_PARA_ID`. +pub const FOREIGN_ASSET_INNER_JUNCTION: Junction = GeneralIndex(1234567); + +// This child parachain acts as trusted reserve for say.. USDC that can be used for fees. +pub const USDC_RESERVE_PARA_ID: u32 = 2002; +// Inner junction of reserve asset on `USDC_RESERVE_PARA_ID`. +pub const USDC_INNER_JUNCTION: Junction = PalletInstance(42); + +// This child parachain is a trusted teleporter for say.. USDT (T from Teleport :)). +// We'll use USDT in tests that teleport fees. +pub const USDT_PARA_ID: u32 = 2003; + +// This child parachain is not configured as trusted reserve or teleport location for any assets. +pub const OTHER_PARA_ID: u32 = 2009; + parameter_types! { pub const RelayLocation: MultiLocation = Here.into_location(); + pub const NativeAsset: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(Here.into_location()), + }; + pub const SystemParachainLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(SOME_SYSTEM_PARA)) + }; + pub const ForeignReserveLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(FOREIGN_ASSET_RESERVE_PARA_ID)) + }; + pub const ForeignAsset: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X2(Parachain(FOREIGN_ASSET_RESERVE_PARA_ID), FOREIGN_ASSET_INNER_JUNCTION), + }), + }; + pub const UsdcReserveLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(USDC_RESERVE_PARA_ID)) + }; + pub const Usdc: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X2(Parachain(USDC_RESERVE_PARA_ID), USDC_INNER_JUNCTION), + }), + }; + pub const UsdtTeleportLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(USDT_PARA_ID)) + }; + pub const Usdt: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X1(Parachain(USDT_PARA_ID)), + }), + }; pub const AnyNetwork: Option = None; pub UniversalLocation: InteriorMultiLocation = Here; pub UnitWeightCost: u64 = 1_000; + pub CheckingAccount: AccountId = XcmPallet::check_account(); } -pub type SovereignAccountOf = - (ChildParachainConvertsVia, AccountId32Aliases); +pub type SovereignAccountOf = ( + ChildParachainConvertsVia, + AccountId32Aliases, + HashedDescription, +); -pub type LocalAssetTransactor = - XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; +pub type ForeignAssetsConvertedConcreteId = MatchedConvertedConcreteId< + MultiLocation, + Balance, + // Excludes relay/parent chain currency + EverythingBut<(Equals,)>, + Identity, + JustTry, +>; + +pub type AssetTransactors = ( + XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>, + FungiblesAdapter< + Assets, + ForeignAssetsConvertedConcreteId, + SovereignAccountOf, + AccountId, + NoChecking, + CheckingAccount, + >, +); type LocalOriginConverter = ( SovereignSignedViaLocation, @@ -303,7 +427,12 @@ type LocalOriginConverter = ( parameter_types! { pub const BaseXcmWeight: Weight = Weight::from_parts(1_000, 1_000); pub CurrencyPerSecondPerByte: (AssetId, u128, u128) = (Concrete(RelayLocation::get()), 1, 1); - pub TrustedAssets: (MultiAssetFilter, MultiLocation) = (All.into(), Here.into()); + pub TrustedLocal: (MultiAssetFilter, MultiLocation) = (All.into(), Here.into()); + pub TrustedSystemPara: (MultiAssetFilter, MultiLocation) = (NativeAsset::get().into(), SystemParachainLocation::get()); + pub TrustedUsdt: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), UsdtTeleportLocation::get()); + pub TeleportUsdtToForeign: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), ForeignReserveLocation::get()); + pub TrustedForeign: (MultiAssetFilter, MultiLocation) = (ForeignAsset::get().into(), ForeignReserveLocation::get()); + pub TrustedUsdc: (MultiAssetFilter, MultiLocation) = (Usdc::get().into(), UsdcReserveLocation::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; pub XcmFeesTargetAccount: AccountId = AccountId::new([167u8; 32]); @@ -323,14 +452,21 @@ pub type Barrier = ( AllowSubscriptionsFrom, ); +pub type XcmRouter = (TestPaidForPara3000SendXcm, TestSendXcmErrX8, TestSendXcm); + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; - type XcmSender = (TestPaidForPara3000SendXcm, TestSendXcm); - type AssetTransactor = LocalAssetTransactor; + type XcmSender = XcmRouter; + type AssetTransactor = AssetTransactors; type OriginConverter = LocalOriginConverter; - type IsReserve = (); - type IsTeleporter = Case; + type IsReserve = (Case, Case); + type IsTeleporter = ( + Case, + Case, + Case, + Case, + ); type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = FixedWeightBounds; @@ -360,15 +496,10 @@ parameter_types! { pub static AdvertisedXcmVersion: pallet_xcm::XcmVersion = 3; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1000).into()); -} - impl pallet_xcm::Config for Test { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; - type XcmRouter = (TestSendXcmErrX8, TestPaidForPara3000SendXcm, TestSendXcm); + type XcmRouter = XcmRouter; type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmExecuteFilter = Everything; type XcmExecutor = XcmExecutor; @@ -380,6 +511,7 @@ impl pallet_xcm::Config for Test { type RuntimeCall = RuntimeCall; const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; type AdvertisedXcmVersion = AdvertisedXcmVersion; + type AdminOrigin = EnsureRoot; type TrustedLockers = (); type SovereignAccountOf = AccountId32Aliases<(), AccountId32>; type Currency = Balances; @@ -388,9 +520,6 @@ impl pallet_xcm::Config for Test { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - type AdminOrigin = EnsureRoot; } impl origin::Config for Test {} @@ -401,6 +530,24 @@ impl pallet_test_notifier::Config for Test { type RuntimeCall = RuntimeCall; } +#[cfg(feature = "runtime-benchmarks")] +impl super::benchmarking::Config for Test { + fn reachable_dest() -> Option { + Some(Parachain(1000).into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + Some((NativeAsset::get(), SystemParachainLocation::get())) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + Some(( + MultiAsset { fun: Fungible(10), id: Concrete(Here.into_location()) }, + Parachain(OTHER_PARA_ID).into(), + )) + } +} + pub(crate) fn last_event() -> RuntimeEvent { System::events().pop().expect("RuntimeEvent expected").event } @@ -416,10 +563,10 @@ pub(crate) fn buy_execution(fees: impl Into) -> Instruction { pub(crate) fn buy_limited_execution( fees: impl Into, - weight: Weight, + weight_limit: WeightLimit, ) -> Instruction { use xcm::latest::prelude::*; - BuyExecution { fees: fees.into(), weight_limit: Limited(weight) } + BuyExecution { fees: fees.into(), weight_limit } } pub(crate) fn new_test_ext_with_balances( diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs new file mode 100644 index 00000000000..b02b0fd33c3 --- /dev/null +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -0,0 +1,1405 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +#![cfg(test)] + +use crate::{ + mock::*, + tests::{ALICE, BOB, FEE_AMOUNT, INITIAL_BALANCE, SEND_AMOUNT}, +}; +use frame_support::{ + assert_ok, + traits::{tokens::fungibles::Inspect, Currency}, + weights::Weight, +}; +use polkadot_parachain_primitives::primitives::Id as ParaId; +use sp_runtime::{traits::AccountIdConversion, DispatchError, ModuleError}; +use xcm::prelude::*; +use xcm_executor::traits::ConvertLocation; + +// Helper function to deduplicate testing different teleport types. +fn do_test_and_verify_teleport_assets( + expected_beneficiary: MultiLocation, + call: Call, + expected_weight_limit: WeightLimit, +) { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get() * 3; + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + // call extrinsic + call(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); + assert_eq!( + sent_xcm(), + vec![( + RelayLocation::get().into(), + Xcm(vec![ + ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Here, SEND_AMOUNT), expected_weight_limit), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: expected_beneficiary + }, + ]), + )] + ); + let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); + let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + }); +} + +/// Test `teleport_assets` +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn teleport_assets_works() { + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + do_test_and_verify_teleport_assets( + beneficiary, + || { + assert_ok!(XcmPallet::teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(beneficiary.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + )); + }, + Unlimited, + ); +} + +/// Test `limited_teleport_assets` +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn limited_teleport_assets_works() { + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); + let expected_weight_limit = weight_limit.clone(); + do_test_and_verify_teleport_assets( + beneficiary, + || { + assert_ok!(XcmPallet::limited_teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(beneficiary.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + weight_limit, + )); + }, + expected_weight_limit, + ); +} + +/// Test `reserve_transfer_assets_with_paid_router_works` +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +/// Verifies that XCM router fees (`SendXcm::validate` -> `MultiAssets`) are withdrawn from correct +/// user account and deposited to a correct target account (`XcmFeesTargetAccount`). +#[test] +fn reserve_transfer_assets_with_paid_router_works() { + let user_account = AccountId::from(XCM_FEES_NOT_WAIVED_USER_ACCOUNT); + let paid_para_id = Para3000::get(); + let balances = vec![ + (user_account.clone(), INITIAL_BALANCE), + (ParaId::from(paid_para_id).into_account_truncating(), INITIAL_BALANCE), + (XcmFeesTargetAccount::get(), INITIAL_BALANCE), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let xcm_router_fee_amount = Para3000PaymentAmount::get(); + let weight = BaseXcmWeight::get(); + let dest: MultiLocation = + Junction::AccountId32 { network: None, id: user_account.clone().into() }.into(); + assert_eq!(Balances::total_balance(&user_account), INITIAL_BALANCE); + assert_ok!(XcmPallet::reserve_transfer_assets( + RuntimeOrigin::signed(user_account.clone()), + Box::new(Parachain(paid_para_id).into()), + Box::new(dest.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + )); + + // XCM_FEES_NOT_WAIVED_USER_ACCOUNT spent amount + assert_eq!( + Balances::free_balance(user_account), + INITIAL_BALANCE - SEND_AMOUNT - xcm_router_fee_amount + ); + + // Destination account (parachain account) has amount + let para_acc: AccountId = ParaId::from(paid_para_id).into_account_truncating(); + assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); + + // XcmFeesTargetAccount where should lend xcm_router_fee_amount + assert_eq!( + Balances::free_balance(XcmFeesTargetAccount::get()), + INITIAL_BALANCE + xcm_router_fee_amount + ); + + let dest_para: MultiLocation = Parachain(paid_para_id).into(); + assert_eq!( + sent_xcm(), + vec![( + dest_para, + Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_execution((Parent, SEND_AMOUNT)), + DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, + ]), + )] + ); + let mut last_events = last_events(5).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + // balances events + last_events.next().unwrap(); + last_events.next().unwrap(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: dest, + fees: Para3000PaymentMultiAssets::get(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + }); +} + +fn set_up_foreign_asset( + reserve_para_id: u32, + inner_junction: Option, + initial_amount: u128, + is_sufficient: bool, +) -> (MultiLocation, AccountId, MultiLocation) { + let reserve_location = + RelayLocation::get().pushed_with_interior(Parachain(reserve_para_id)).unwrap(); + let reserve_sovereign_account = + SovereignAccountOf::convert_location(&reserve_location).unwrap(); + + let foreign_asset_id_multilocation = if let Some(junction) = inner_junction { + reserve_location.pushed_with_interior(junction).unwrap() + } else { + reserve_location + }; + + // create sufficient (to be used as fees as well) foreign asset (0 total issuance) + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + foreign_asset_id_multilocation, + BOB, + is_sufficient, + 1 + )); + // this asset should have been teleported/reserve-transferred in, but for this test we just + // mint it locally. + assert_ok!(Assets::mint( + RuntimeOrigin::signed(BOB), + foreign_asset_id_multilocation, + ALICE, + initial_amount + )); + + (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) +} + +// Helper function that provides correct `fee_index` after `sort()` done by +// `vec![MultiAsset, MultiAsset].into()`. +fn into_multiassets_checked( + fee_asset: MultiAsset, + transfer_asset: MultiAsset, +) -> (MultiAssets, usize, MultiAsset, MultiAsset) { + let assets: MultiAssets = vec![fee_asset.clone(), transfer_asset.clone()].into(); + let fee_index = if assets.get(0).unwrap().eq(&fee_asset) { 0 } else { 1 }; + (assets, fee_index, fee_asset, transfer_asset) +} + +/// Test `limited_reserve_transfer_assets` with local asset reserve and local fee reserve. +/// +/// Transferring native asset (local reserve) to some `OTHER_PARA_ID` (no teleport trust). +/// Using native asset for fees as well. +/// +/// ```nocompile +/// Here (source) OTHER_PARA_ID (destination) +/// | `assets` reserve +/// | `fees` reserve +/// | +/// | 1. execute `TransferReserveAsset(assets_and_fees_batched_together)` +/// | \--> sends `ReserveAssetDeposited(both), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn limited_reserve_transfer_assets_with_local_asset_reserve_and_local_fee_reserve_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), + ]; + + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); + let expected_weight_limit = weight_limit.clone(); + let expected_beneficiary = beneficiary; + let dest: MultiLocation = Parachain(OTHER_PARA_ID).into(); + + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + // call extrinsic + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + weight_limit, + )); + // Alice spent amount + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Destination account (parachain account) has amount + let para_acc: AccountId = ParaId::from(OTHER_PARA_ID).into_account_truncating(); + assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Parent, SEND_AMOUNT), expected_weight_limit), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: expected_beneficiary + }, + ]), + )] + ); + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: expected_beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + }); +} + +/// Test `reserve_transfer_assets` with destination asset reserve and local fee reserve. +/// +/// Transferring foreign asset (`FOREIGN_ASSET_RESERVE_PARA_ID` reserve) to +/// `FOREIGN_ASSET_RESERVE_PARA_ID` (no teleport trust). +/// Using native asset (local reserve) for fees. +/// +/// ```nocompile +/// Here (source) FOREIGN_ASSET_RESERVE_PARA_ID (destination) +/// | `fees` reserve `assets` reserve +/// | +/// | 1. execute `TransferReserveAsset(fees)` +/// | \-> sends `ReserveAssetDeposited(fees), ClearOrigin, BuyExecution(fees), DepositAsset` +/// | 2. execute `InitiateReserveWithdraw(assets)` +/// | \--> sends `WithdrawAsset(assets), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_works() { + let weight = BaseXcmWeight::get() * 3; + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is reserve location (no teleport trust) + let dest = reserve_location; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // native asset for fee - local reserve + (MultiLocation::here(), FEE_AMOUNT).into(), + // foreign asset to transfer - destination reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + + // Alice spent (transferred) amount + assert_eq!( + Assets::balance(foreign_asset_id_multilocation, ALICE), + foreign_initial_amount - SEND_AMOUNT + ); + // Alice used native asset for fees + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - FEE_AMOUNT); + // Destination account (parachain account) added native reserve used as fee to balances + assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), FEE_AMOUNT); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); + // Verify total and active issuance of foreign BLA have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + // `fees` are being sent through local-reserve transfer because fee reserve is + // local chain; `assets` are burned on source and withdrawn from SA here + Xcm(vec![ + ReserveAssetDeposited((Parent, FEE_AMOUNT).into()), + buy_limited_execution(expected_fee, Unlimited), + WithdrawAsset(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + }); +} + +/// Test `reserve_transfer_assets` with remote asset reserve and local fee reserve. +/// +/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to `OTHER_PARA_ID`. +/// Using native (local reserve) as fee should be disallowed. +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (_, _, foreign_asset_id_multilocation) = set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is OTHER_PARA_ID (foreign asset needs to go through its reserve + // chain) + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + + let (assets, fee_index, _, _) = into_multiassets_checked( + // native asset for fee - local reserve + (MultiLocation::here(), FEE_AMOUNT).into(), + // foreign asset to transfer - remote reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // try the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + + // Alice transferred nothing + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + // Alice spent native asset for fees + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Verify total and active issuance of foreign BLA asset have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); + }); +} + +/// Test `reserve_transfer_assets` with local asset reserve and destination fee reserve. +/// +/// Transferring native asset (local reserve) to `USDC_RESERVE_PARA_ID` (no teleport trust). Using +/// foreign asset (`USDC_RESERVE_PARA_ID` reserve) for fees. +/// +/// ```nocompile +/// Here (source) USDC_RESERVE_PARA_ID (destination) +/// | `assets` reserve `fees` reserve +/// | +/// | 1. execute `InitiateReserveWithdraw(fees)` +/// | \--> sends `WithdrawAsset(fees), ClearOrigin, BuyExecution(fees), DepositAsset` +/// | 2. execute `TransferReserveAsset(assets)` +/// | \-> sends `ReserveAssetDeposited(assets), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn reserve_transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 142; + let (usdc_reserve_location, usdc_chain_sovereign_account, usdc_id_multilocation) = + set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // native assets transfer to fee reserve location (no teleport trust) + let dest = usdc_reserve_location; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // usdc for fees (is sufficient on local chain too) - destination reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + let weight = BaseXcmWeight::get() * 3; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + + // Alice spent (fees) amount + assert_eq!( + Assets::balance(usdc_id_multilocation, ALICE), + usdc_initial_local_amount - FEE_AMOUNT + ); + // Alice used native asset for transfer + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Sovereign account of dest parachain holds `SEND_AMOUNT` native asset in local reserve + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), SEND_AMOUNT); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + // Verify total and active issuance of USDC have decreased (burned on reserve-withdraw) + let expected_issuance = usdc_initial_local_amount - FEE_AMOUNT; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + // fees are being sent through destination-reserve transfer because fee reserve + // is destination chain + WithdrawAsset(expected_fee.clone().into()), + buy_limited_execution(expected_fee, Unlimited), + // transfer is through local-reserve transfer because `assets` (native asset) + // have local reserve + ReserveAssetDeposited(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + }); +} + +/// Test `reserve_transfer_assets` with destination asset reserve and destination fee reserve. +/// +/// ```nocompile +/// Here (source) FOREIGN_ASSET_RESERVE_PARA_ID (destination) +/// | `fees` reserve +/// | `assets` reserve +/// | +/// | 1. execute `InitiateReserveWithdraw(assets_and_fees_batched_together)` +/// | \--> sends `WithdrawAsset(batch), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_destination_fee_reserve_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // we'll send just this foreign asset back to its reserve location and use it for fees as + // well + let foreign_initial_amount = 142; + let (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + true, + ); + + // transfer destination is reserve location + let dest = reserve_location; + let assets: MultiAssets = vec![(foreign_asset_id_multilocation, SEND_AMOUNT).into()].into(); + let fee_index = 0; + + // reanchor according to test-case + let mut expected_assets = assets.clone(); + expected_assets.reanchor(&dest, UniversalLocation::get()).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index, + Unlimited, + )); + + let weight = BaseXcmWeight::get() * 2; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + + // Alice spent (transferred) amount + assert_eq!( + Assets::balance(foreign_asset_id_multilocation, ALICE), + foreign_initial_amount - SEND_AMOUNT + ); + // Alice's native asset balance is untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Reserve sovereign account has same balances + assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); + // Verify total and active issuance of foreign BLA have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + Parachain(FOREIGN_ASSET_RESERVE_PARA_ID).into(), + Xcm(vec![ + WithdrawAsset(expected_assets.clone()), + ClearOrigin, + buy_limited_execution(expected_assets.get(0).unwrap().clone(), Unlimited), + DepositAsset { assets: AllCounted(1).into(), beneficiary }, + ]), + )] + ); + }); +} + +/// Test `reserve_transfer_assets` with remote asset reserve and destination fee reserve is +/// disallowed. +/// +/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to +/// `USDC_RESERVE_PARA_ID`. Using USDC (destination reserve) as fee. +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 42; + let (usdc_chain, _, usdc_id_multilocation) = set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (_, _, foreign_asset_id_multilocation) = set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is USDC chain (foreign asset BLA needs to go through its separate + // reserve chain) + let dest = usdc_chain; + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDC for fees (is sufficient on local chain too) - destination reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - remote reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + let expected_usdc_issuance = usdc_initial_local_amount; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + let expected_bla_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + }); +} + +/// Test `reserve_transfer_assets` with local asset reserve and remote fee reserve is disallowed. +/// +/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign +/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. +#[test] +fn reserve_transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 142; + let (_, usdc_chain_sovereign_account, usdc_id_multilocation) = set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // transfer destination is some other parachain != fee reserve location (no teleport trust) + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + let dest_sovereign_account = SovereignAccountOf::convert_location(&dest).unwrap(); + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDC for fees (is sufficient on local chain too) - remote reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Sovereign account of reserve parachain is unchanged + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + assert_eq!(Balances::free_balance(dest_sovereign_account), 0); + let expected_usdc_issuance = usdc_initial_local_amount; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + }); +} + +/// Test `reserve_transfer_assets` with destination asset reserve and remote fee reserve is +/// disallowed. +/// +/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign +/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 42; + let (_, usdc_chain_sovereign_account, usdc_id_multilocation) = set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (reserve_location, foreign_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is asset reserve location + let dest = reserve_location; + let dest_sovereign_account = foreign_sovereign_account; + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDC for fees (is sufficient on local chain too) - remote reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - destination reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + assert_eq!(Balances::free_balance(dest_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, dest_sovereign_account), 0); + let expected_usdc_issuance = usdc_initial_local_amount; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + let expected_bla_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + }); +} + +/// Test `reserve_transfer_assets` with remote asset reserve and (same) remote fee reserve. +/// +/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign +/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. +/// +/// ```nocompile +/// | chain `A` | chain `C` | chain `B` +/// | Here (source) | USDC_RESERVE_PARA_ID | OTHER_PARA_ID (destination) +/// | | `fees` reserve | +/// | | `assets` reserve | +/// | +/// | 1. `A` executes `InitiateReserveWithdraw(both)` dest `C` +/// | -----------------> `C` executes `DepositReserveAsset(both)` dest `B` +/// | --------------------------> `DepositAsset(both)` +/// ``` +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_remote_fee_reserve_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 142; + let (usdc_chain, usdc_chain_sovereign_account, usdc_id_multilocation) = + set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // transfer destination is some other parachain + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + + let assets: MultiAssets = vec![(usdc_id_multilocation, SEND_AMOUNT).into()].into(); + let fee_index = 0; + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_dest_on_reserve = dest.reanchored(&usdc_chain, context).unwrap(); + let fees = assets.get(fee_index).unwrap().clone(); + let (fees_half_1, fees_half_2) = XcmPallet::halve_fees(fees).unwrap(); + let mut expected_assets_on_reserve = assets.clone(); + expected_assets_on_reserve.reanchor(&usdc_chain, context).unwrap(); + let expected_fee_on_reserve = fees_half_1.reanchored(&usdc_chain, context).unwrap(); + let expected_fee_on_dest = fees_half_2.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + assert!(matches!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(_) }) + )); + + // Alice spent (transferred) amount + assert_eq!( + Assets::balance(usdc_id_multilocation, ALICE), + usdc_initial_local_amount - SEND_AMOUNT + ); + // Alice's native asset balance is untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Destination account (parachain account) has expected (same) balances + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + // Verify total and active issuance of USDC have decreased (burned on reserve-withdraw) + let expected_usdc_issuance = usdc_initial_local_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + // first message sent to reserve chain + usdc_chain, + Xcm(vec![ + WithdrawAsset(expected_assets_on_reserve), + ClearOrigin, + BuyExecution { fees: expected_fee_on_reserve, weight_limit: Unlimited }, + DepositReserveAsset { + assets: Wild(AllCounted(1)), + // final destination is `dest` as seen by `reserve` + dest: expected_dest_on_reserve, + // message sent onward to `dest` + xcm: Xcm(vec![ + buy_limited_execution(expected_fee_on_dest, Unlimited), + DepositAsset { assets: AllCounted(1).into(), beneficiary } + ]) + } + ]) + )], + ); + }); +} + +/// Test `reserve_transfer_assets` with local asset reserve and teleported fee. +/// +/// Transferring native asset (local reserve) to `USDT_PARA_ID`. Using teleport-trusted USDT for +/// fees. +/// +/// ```nocompile +/// Here (source) USDT_PARA_ID (destination) +/// | `assets` reserve `fees` teleport-trust +/// | +/// | 1. execute `InitiateTeleport(fees)` +/// | \--> sends `ReceiveTeleportedAsset(fees), .., DepositAsset(fees)` +/// | 2. execute `TransferReserveAsset(assets)` +/// | \-> sends `ReserveAssetDeposited(assets), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn reserve_transfer_assets_with_local_asset_reserve_and_teleported_fee_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT (0 total issuance) + let usdt_initial_local_amount = 42; + let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); + + // native assets transfer destination is USDT chain (teleport trust only for USDT) + let dest = usdt_chain; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // USDT for fees (is sufficient on local chain too) - teleported + (usdt_id_multilocation, FEE_AMOUNT).into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + let weight = BaseXcmWeight::get() * 3; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + // Alice spent (fees) amount + assert_eq!( + Assets::balance(usdt_id_multilocation, ALICE), + usdt_initial_local_amount - FEE_AMOUNT + ); + // Alice used native asset for transfer + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Sovereign account of dest parachain holds `SEND_AMOUNT` native asset in local reserve + assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), SEND_AMOUNT); + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + // Verify total and active issuance have decreased (teleported) + let expected_usdt_issuance = usdt_initial_local_amount - FEE_AMOUNT; + assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + // fees are teleported to destination chain + ReceiveTeleportedAsset(expected_fee.clone().into()), + buy_limited_execution(expected_fee, Unlimited), + // transfer is through local-reserve transfer because `assets` (native + // asset) have local reserve + ReserveAssetDeposited(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + }); +} + +/// Test `reserve_transfer_assets` with destination asset reserve and teleported fee. +/// +/// Transferring foreign asset (destination reserve) to `FOREIGN_ASSET_RESERVE_PARA_ID`. Using +/// teleport-trusted USDT for fees. +/// +/// ```nocompile +/// Here (source) FOREIGN_ASSET_RESERVE_PARA_ID (destination) +/// | `fees` (USDT) teleport-trust +/// | `assets` reserve +/// | +/// | 1. execute `InitiateTeleport(fees)` +/// | \--> sends `ReceiveTeleportedAsset(fees), .., DepositAsset(fees)` +/// | 2. execute `InitiateReserveWithdraw(assets)` +/// | \--> sends `WithdrawAsset(asset), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_teleported_fee_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT (0 total issuance) + let usdt_initial_local_amount = 42; + let (_, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); + + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (reserve_location, foreign_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is asset reserve location + let dest = reserve_location; + let dest_sovereign_account = foreign_sovereign_account; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // USDT for fees (is sufficient on local chain too) - teleported + (usdt_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - destination reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + let weight = BaseXcmWeight::get() * 4; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Alice spent USDT for fees + assert_eq!( + Assets::balance(usdt_id_multilocation, ALICE), + usdt_initial_local_amount - FEE_AMOUNT + ); + // Alice transferred BLA + assert_eq!( + Assets::balance(foreign_asset_id_multilocation, ALICE), + foreign_initial_amount - SEND_AMOUNT + ); + // Verify balances of USDT reserve parachain + assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + // Verify balances of transferred-asset reserve parachain + assert_eq!(Balances::free_balance(dest_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, dest_sovereign_account), 0); + // Verify total and active issuance of USDT have decreased (teleported) + let expected_usdt_issuance = usdt_initial_local_amount - FEE_AMOUNT; + assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); + // Verify total and active issuance of foreign BLA asset have decreased (burned on + // reserve-withdraw) + let expected_bla_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + // fees are teleported to destination chain + ReceiveTeleportedAsset(expected_fee.clone().into()), + buy_limited_execution(expected_fee, Unlimited), + // assets are withdrawn from origin's local SA + WithdrawAsset(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + }); +} + +/// Test `reserve_transfer_assets` with remote asset reserve and teleported fee is disallowed. +/// +/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to `USDT_PARA_ID`. +/// Using teleport-trusted USDT for fees. +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT (0 total issuance) + let usdt_initial_local_amount = 42; + let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); + + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (_, reserve_sovereign_account, foreign_asset_id_multilocation) = set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is USDT chain (foreign asset needs to go through its reserve chain) + let dest = usdt_chain; + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDT for fees (is sufficient on local chain too) - teleported + (usdt_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - remote reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); + let expected_usdt_issuance = usdt_initial_local_amount; + assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); + let expected_bla_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + }); +} + +/// Test `reserve_transfer_assets` single asset which is teleportable - should fail. +/// +/// Attempting to reserve-transfer teleport-trusted USDT to `USDT_PARA_ID` should fail. +#[test] +fn reserve_transfer_assets_with_teleportable_asset_fails() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT (0 total issuance) + let usdt_initial_local_amount = 42; + let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); + + // transfer destination is USDT chain (foreign asset needs to go through its reserve chain) + let dest = usdt_chain; + let assets: MultiAssets = vec![(usdt_id_multilocation, FEE_AMOUNT).into()].into(); + let fee_index = 0; + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let res = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + res, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered") + })) + ); + // Alice native asset is still same + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Alice USDT balance is still same + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + // No USDT moved to sovereign account of reserve parachain + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + // Verify total and active issuance of USDT are still the same + assert_eq!(Assets::total_issuance(usdt_id_multilocation), usdt_initial_local_amount); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), usdt_initial_local_amount); + }); +} diff --git a/polkadot/xcm/pallet-xcm/src/tests.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs similarity index 68% rename from polkadot/xcm/pallet-xcm/src/tests.rs rename to polkadot/xcm/pallet-xcm/src/tests/mod.rs index d267eece2c0..72814e507f2 100644 --- a/polkadot/xcm/pallet-xcm/src/tests.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -14,6 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +#![cfg(test)] + +mod assets_transfer; + use crate::{ mock::*, AssetTraps, CurrentMigration, Error, LatestVersionedMultiLocation, Queries, QueryStatus, VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, @@ -35,15 +39,15 @@ use xcm_executor::{ const ALICE: AccountId = AccountId::new([0u8; 32]); const BOB: AccountId = AccountId::new([1u8; 32]); -const PARA_ID: u32 = 2000; const INITIAL_BALANCE: u128 = 100; const SEND_AMOUNT: u128 = 10; +const FEE_AMOUNT: u128 = 2; #[test] fn report_outcome_notify_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); let mut message = @@ -56,7 +60,7 @@ fn report_outcome_notify_works() { new_test_ext_with_balances(balances).execute_with(|| { XcmPallet::report_outcome_notify( &mut message, - Parachain(PARA_ID).into_location(), + Parachain(OTHER_PARA_ID).into_location(), notify, 100, ) @@ -74,8 +78,8 @@ fn report_outcome_notify_works() { ); let querier: MultiLocation = Here.into(); let status = QueryStatus::Pending { - responder: MultiLocation::from(Parachain(PARA_ID)).into(), - maybe_notify: Some((4, 2)), + responder: MultiLocation::from(Parachain(OTHER_PARA_ID)).into(), + maybe_notify: Some((5, 2)), timeout: 100, maybe_match_querier: Some(querier.into()), }; @@ -89,7 +93,7 @@ fn report_outcome_notify_works() { }]); let hash = fake_message_hash(&message); let r = XcmExecutor::::execute_xcm( - Parachain(PARA_ID), + Parachain(OTHER_PARA_ID), message, hash, Weight::from_parts(1_000_000_000, 1_000_000_000), @@ -99,13 +103,13 @@ fn report_outcome_notify_works() { last_events(2), vec![ RuntimeEvent::TestNotifier(pallet_test_notifier::Event::ResponseReceived( - Parachain(PARA_ID).into(), + Parachain(OTHER_PARA_ID).into(), 0, Response::ExecutionResult(None), )), RuntimeEvent::XcmPallet(crate::Event::Notified { query_id: 0, - pallet_index: 4, + pallet_index: 5, call_index: 2 }), ] @@ -118,13 +122,14 @@ fn report_outcome_notify_works() { fn report_outcome_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); let mut message = Xcm(vec![TransferAsset { assets: (Here, SEND_AMOUNT).into(), beneficiary: sender }]); new_test_ext_with_balances(balances).execute_with(|| { - XcmPallet::report_outcome(&mut message, Parachain(PARA_ID).into_location(), 100).unwrap(); + XcmPallet::report_outcome(&mut message, Parachain(OTHER_PARA_ID).into_location(), 100) + .unwrap(); assert_eq!( message, Xcm(vec![ @@ -138,7 +143,7 @@ fn report_outcome_works() { ); let querier: MultiLocation = Here.into(); let status = QueryStatus::Pending { - responder: MultiLocation::from(Parachain(PARA_ID)).into(), + responder: MultiLocation::from(Parachain(OTHER_PARA_ID)).into(), maybe_notify: None, timeout: 100, maybe_match_querier: Some(querier.into()), @@ -153,7 +158,7 @@ fn report_outcome_works() { }]); let hash = fake_message_hash(&message); let r = XcmExecutor::::execute_xcm( - Parachain(PARA_ID), + Parachain(OTHER_PARA_ID), message, hash, Weight::from_parts(1_000_000_000, 1_000_000_000), @@ -177,7 +182,7 @@ fn report_outcome_works() { fn custom_querier_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let querier: MultiLocation = @@ -281,7 +286,7 @@ fn custom_querier_works() { fn send_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); @@ -325,7 +330,7 @@ fn send_works() { fn send_fails_when_xcm_router_blocks() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let sender: MultiLocation = @@ -346,344 +351,6 @@ fn send_fails_when_xcm_router_blocks() { }); } -/// Test `teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn teleport_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 3; - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - let dest: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - RelayLocation::get().into(), - Xcm(vec![ - ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Here, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn limited_teleport_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 3; - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - let dest: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::limited_teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Limited(Weight::from_parts(5000, 5000)), - )); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - RelayLocation::get().into(), - Xcm(vec![ - ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), - ClearOrigin, - buy_limited_execution((Here, SEND_AMOUNT), Weight::from_parts(5000, 5000)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_teleport_assets` with unlimited weight -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn unlimited_teleport_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 3; - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - let dest: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::limited_teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Unlimited, - )); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - RelayLocation::get().into(), - Xcm(vec![ - ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Here, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `reserve_transfer_assets` -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -#[test] -fn reserve_transfer_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(Parachain(PARA_ID).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - // Alice spent amount - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - Parachain(PARA_ID).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `reserve_transfer_assets_with_paid_router_works` -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -/// Verifies that XCM router fees (`SendXcm::validate` -> `MultiAssets`) are withdrawn from correct -/// user account and deposited to a correct target account (`XcmFeesTargetAccount`). -#[test] -fn reserve_transfer_assets_with_paid_router_works() { - let user_account = AccountId::from(XCM_FEES_NOT_WAIVED_USER_ACCOUNT); - let paid_para_id = Para3000::get(); - let balances = vec![ - (user_account.clone(), INITIAL_BALANCE), - (ParaId::from(paid_para_id).into_account_truncating(), INITIAL_BALANCE), - (XcmFeesTargetAccount::get(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let xcm_router_fee_amount = Para3000PaymentAmount::get(); - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = - Junction::AccountId32 { network: None, id: user_account.clone().into() }.into(); - assert_eq!(Balances::total_balance(&user_account), INITIAL_BALANCE); - assert_ok!(XcmPallet::reserve_transfer_assets( - RuntimeOrigin::signed(user_account.clone()), - Box::new(Parachain(paid_para_id).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - // check event - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - - // XCM_FEES_NOT_WAIVED_USER_ACCOUNT spent amount - assert_eq!( - Balances::free_balance(user_account), - INITIAL_BALANCE - SEND_AMOUNT - xcm_router_fee_amount - ); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(paid_para_id).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - // XcmFeesTargetAccount where should lend xcm_router_fee_amount - assert_eq!( - Balances::free_balance(XcmFeesTargetAccount::get()), - INITIAL_BALANCE + xcm_router_fee_amount - ); - assert_eq!( - sent_xcm(), - vec![( - Parachain(paid_para_id).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_reserve_transfer_assets` -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -#[test] -fn limited_reserve_transfer_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(Parachain(PARA_ID).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Limited(Weight::from_parts(5000, 5000)), - )); - // Alice spent amount - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - Parachain(PARA_ID).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_limited_execution((Parent, SEND_AMOUNT), Weight::from_parts(5000, 5000)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_reserve_transfer_assets` with unlimited weight purchasing -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -#[test] -fn unlimited_reserve_transfer_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(Parachain(PARA_ID).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Unlimited, - )); - // Alice spent amount - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - Parachain(PARA_ID).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - /// Test local execution of XCM /// /// Asserts that the sender's balance is decreased and the beneficiary's balance @@ -692,7 +359,7 @@ fn unlimited_reserve_transfer_assets_works() { fn execute_withdraw_to_deposit_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let weight = BaseXcmWeight::get() * 3; diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index 8a1575d9bc9..89e25984443 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -444,6 +444,21 @@ impl MultiLocation { } } } + + /// Return the MultiLocation subsection identifying the chain that `self` points to. + pub fn chain_location(&self) -> MultiLocation { + let mut clone = *self; + // start popping junctions until we reach chain identifier + while let Some(j) = clone.last() { + if matches!(j, Junction::Parachain(_) | Junction::GlobalConsensus(_)) { + // return chain subsection + return clone + } else { + (clone, _) = clone.split_last_interior(); + } + } + MultiLocation::new(clone.parents, Junctions::Here) + } } impl TryFrom for MultiLocation { @@ -674,6 +689,57 @@ mod tests { assert_eq!(iter.next_back(), None); } + #[test] + fn chain_location_works() { + // Relay-chain or parachain context pointing to local resource, + let relay_to_local = MultiLocation::new(0, (PalletInstance(42), GeneralIndex(42))); + assert_eq!(relay_to_local.chain_location(), MultiLocation::here()); + + // Relay-chain context pointing to child parachain, + let relay_to_child = + MultiLocation::new(0, (Parachain(42), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(0, Parachain(42)); + assert_eq!(relay_to_child.chain_location(), expected); + + // Relay-chain context pointing to different consensus relay, + let relay_to_remote_relay = + MultiLocation::new(1, (GlobalConsensus(Kusama), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(1, GlobalConsensus(Kusama)); + assert_eq!(relay_to_remote_relay.chain_location(), expected); + + // Relay-chain context pointing to different consensus parachain, + let relay_to_remote_para = MultiLocation::new( + 1, + (GlobalConsensus(Kusama), Parachain(42), PalletInstance(42), GeneralIndex(42)), + ); + let expected = MultiLocation::new(1, (GlobalConsensus(Kusama), Parachain(42))); + assert_eq!(relay_to_remote_para.chain_location(), expected); + + // Parachain context pointing to relay chain, + let para_to_relay = MultiLocation::new(1, (PalletInstance(42), GeneralIndex(42))); + assert_eq!(para_to_relay.chain_location(), MultiLocation::parent()); + + // Parachain context pointing to sibling parachain, + let para_to_sibling = + MultiLocation::new(1, (Parachain(42), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(1, Parachain(42)); + assert_eq!(para_to_sibling.chain_location(), expected); + + // Parachain context pointing to different consensus relay, + let para_to_remote_relay = + MultiLocation::new(2, (GlobalConsensus(Kusama), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(2, GlobalConsensus(Kusama)); + assert_eq!(para_to_remote_relay.chain_location(), expected); + + // Parachain context pointing to different consensus parachain, + let para_to_remote_para = MultiLocation::new( + 2, + (GlobalConsensus(Kusama), Parachain(42), PalletInstance(42), GeneralIndex(42)), + ); + let expected = MultiLocation::new(2, (GlobalConsensus(Kusama), Parachain(42))); + assert_eq!(para_to_remote_para.chain_location(), expected); + } + #[test] fn conversion_from_other_types_works() { use crate::v2; diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index 3b13cab2c1e..c2b62751c68 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -81,10 +81,15 @@ impl> ShouldExecute for AllowTopLevelPaidExecutionFro instructions[..end] .matcher() .match_next_inst(|inst| match inst { - ReceiveTeleportedAsset(..) | ReserveAssetDeposited(..) => Ok(()), - WithdrawAsset(ref assets) if assets.len() <= MAX_ASSETS_FOR_BUY_EXECUTION => Ok(()), - ClaimAsset { ref assets, .. } if assets.len() <= MAX_ASSETS_FOR_BUY_EXECUTION => - Ok(()), + ReceiveTeleportedAsset(ref assets) | + ReserveAssetDeposited(ref assets) | + WithdrawAsset(ref assets) | + ClaimAsset { ref assets, .. } => + if assets.len() <= MAX_ASSETS_FOR_BUY_EXECUTION { + Ok(()) + } else { + Err(ProcessMessageError::BadFormat) + }, _ => Err(ProcessMessageError::BadFormat), })? .skip_inst_while(|inst| matches!(inst, ClearOrigin))? diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index e51bd952177..78b9284c689 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -246,11 +246,6 @@ type SovereignAccountOf = ( HashedDescription>, ); -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1000).into()); -} - impl pallet_xcm::Config for Test { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -274,8 +269,6 @@ impl pallet_xcm::Config for Test { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index 5fcba5e2f54..4f183c7a15b 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -210,11 +210,6 @@ impl xcm_executor::Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Here.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type UniversalLocation = UniversalLocation; @@ -239,8 +234,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index 9f0caa80617..d5edb1ea0f5 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -10,6 +10,7 @@ version = "1.0.0" impl-trait-for-tuples = "0.2.2" environmental = { version = "1.1.4", default-features = false } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } xcm = { package = "staging-xcm", path = "..", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } @@ -34,6 +35,7 @@ std = [ "frame-support/std", "log/std", "parity-scale-codec/std", + "scale-info/std", "sp-arithmetic/std", "sp-core/std", "sp-io/std", diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index e43d7a04899..ac256ea1489 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -32,7 +32,7 @@ pub mod traits; use traits::{ validate_export, AssetExchange, AssetLock, CallDispatcher, ClaimAssets, ConvertOrigin, DropAssets, Enact, ExportXcm, FeeManager, FeeReason, OnResponse, Properties, ShouldExecute, - TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, + TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, XcmAssetTransfers, }; mod assets; @@ -254,6 +254,12 @@ impl ExecuteXcm for XcmExecutor XcmAssetTransfers for XcmExecutor { + type IsReserve = Config::IsReserve; + type IsTeleporter = Config::IsTeleporter; + type AssetTransactor = Config::AssetTransactor; +} + #[derive(Debug)] pub struct ExecutorError { pub index: u32, diff --git a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs new file mode 100644 index 00000000000..5fdc9b15e01 --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs @@ -0,0 +1,90 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::traits::TransactAsset; +use frame_support::traits::ContainsPair; +use scale_info::TypeInfo; +use sp_runtime::codec::{Decode, Encode}; +use xcm::prelude::*; + +/// Errors related to determining asset transfer support. +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum Error { + /// Invalid non-concrete asset. + NotConcrete, + /// Reserve chain could not be determined for assets. + UnknownReserve, +} + +/// Specify which type of asset transfer is required for a particular `(asset, dest)` combination. +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum TransferType { + /// should teleport `asset` to `dest` + Teleport, + /// should reserve-transfer `asset` to `dest`, using local chain as reserve + LocalReserve, + /// should reserve-transfer `asset` to `dest`, using `dest` as reserve + DestinationReserve, + /// should reserve-transfer `asset` to `dest`, using remote chain `MultiLocation` as reserve + RemoteReserve(MultiLocation), +} + +/// A trait for identifying asset transfer type based on `IsTeleporter` and `IsReserve` +/// configurations. +pub trait XcmAssetTransfers { + /// Combinations of (Asset, Location) pairs which we trust as reserves. Meaning + /// reserve-based-transfers are to be used for assets matching this filter. + type IsReserve: ContainsPair; + + /// Combinations of (Asset, Location) pairs which we trust as teleporters. Meaning teleports are + /// to be used for assets matching this filter. + type IsTeleporter: ContainsPair; + + /// How to withdraw and deposit an asset. + type AssetTransactor: TransactAsset; + + /// Determine transfer type to be used for transferring `asset` from local chain to `dest`. + fn determine_for(asset: &MultiAsset, dest: &MultiLocation) -> Result { + if Self::IsTeleporter::contains(asset, dest) { + // we trust destination for teleporting asset + return Ok(TransferType::Teleport) + } else if Self::IsReserve::contains(asset, dest) { + // we trust destination as asset reserve location + return Ok(TransferType::DestinationReserve) + } + + // try to determine reserve location based on asset id/location + let asset_location = match asset.id { + Concrete(location) => Ok(location.chain_location()), + _ => Err(Error::NotConcrete), + }?; + if asset_location == MultiLocation::here() || + Self::IsTeleporter::contains(asset, &asset_location) + { + // if the asset is local, then it's a local reserve + // it's also a local reserve if the asset's location is not `here` but it's a location + // where it can be teleported to `here` => local reserve + Ok(TransferType::LocalReserve) + } else if Self::IsReserve::contains(asset, &asset_location) { + // remote location that is recognized as reserve location for asset + Ok(TransferType::RemoteReserve(asset_location)) + } else { + // remote location that is not configured either as teleporter or reserve => cannot + // determine asset reserve + Err(Error::UnknownReserve) + } + } +} diff --git a/polkadot/xcm/xcm-executor/src/traits/mod.rs b/polkadot/xcm/xcm-executor/src/traits/mod.rs index a9439968fa6..71e75c77e93 100644 --- a/polkadot/xcm/xcm-executor/src/traits/mod.rs +++ b/polkadot/xcm/xcm-executor/src/traits/mod.rs @@ -20,10 +20,12 @@ mod conversion; pub use conversion::{CallDispatcher, ConvertLocation, ConvertOrigin, WithOriginFilter}; mod drop_assets; pub use drop_assets::{ClaimAssets, DropAssets}; -mod asset_lock; -pub use asset_lock::{AssetLock, Enact, LockError}; mod asset_exchange; pub use asset_exchange::AssetExchange; +mod asset_lock; +pub use asset_lock::{AssetLock, Enact, LockError}; +mod asset_transfer; +pub use asset_transfer::{Error as AssetTransferError, TransferType, XcmAssetTransfers}; mod export; pub use export::{export_xcm, validate_export, ExportXcm}; mod fee_manager; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain.rs b/polkadot/xcm/xcm-simulator/example/src/parachain.rs index fa9d3300619..9f0411970ce 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/parachain.rs @@ -399,11 +399,6 @@ impl mock_msg_queue::Config for Runtime { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - pub struct TrustedLockerCase(PhantomData); impl> ContainsPair for TrustedLockerCase @@ -443,8 +438,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs index 0fba4cb270d..bdd7ff6d3ea 100644 --- a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs @@ -199,11 +199,6 @@ impl Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1).into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; @@ -228,8 +223,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index f9ad0252285..41234837aca 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -313,11 +313,6 @@ impl mock_msg_queue::Config for Runtime { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -341,8 +336,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 756cf4803b1..c9a57db970a 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -163,11 +163,6 @@ impl Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1).into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; @@ -192,8 +187,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } -- GitLab From c7bd8804389e4b9b7f9d0c39b06094aebb303e5d Mon Sep 17 00:00:00 2001 From: Lulu Date: Mon, 13 Nov 2023 19:10:12 +0000 Subject: [PATCH 12/74] Add CI to claim crates (#2299) --- .github/workflows/claim-crates.yml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 .github/workflows/claim-crates.yml diff --git a/.github/workflows/claim-crates.yml b/.github/workflows/claim-crates.yml new file mode 100644 index 00000000000..a1d28d42828 --- /dev/null +++ b/.github/workflows/claim-crates.yml @@ -0,0 +1,25 @@ +name: Claim Crates + +on: + push: + branches: + - master + +jobs: + claim-crates: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + + - name: Rust Cache + uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 + with: + cache-on-failure: true + + - name: install parity-publish + run: cargo install parity-publish@0.2.0 + + - name: parity-publish claim + env: + PARITY_PUBLISH_CRATESIO_TOKEN: ${{ secrets.CRATESIO_PUBLISH_CLAIM_TOKEN }} + run: parity-publish claim -- GitLab From f332d6881d4275886f5d738ae9eee96d1ced9c94 Mon Sep 17 00:00:00 2001 From: Assem Date: Mon, 13 Nov 2023 20:34:27 +0100 Subject: [PATCH 13/74] =?UTF-8?q?doc(client/cli/src/arg=5Fenums.rs):=20fix?= =?UTF-8?q?=20typo=20=E2=9C=8D=EF=B8=8F=20(#2298)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- substrate/client/cli/src/arg_enums.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/client/cli/src/arg_enums.rs b/substrate/client/cli/src/arg_enums.rs index c55b97675da..d4a4b7cfdf6 100644 --- a/substrate/client/cli/src/arg_enums.rs +++ b/substrate/client/cli/src/arg_enums.rs @@ -225,7 +225,7 @@ pub enum OffchainWorkerEnabled { #[derive(Debug, Clone, Copy, ValueEnum, PartialEq)] #[value(rename_all = "kebab-case")] pub enum SyncMode { - /// Full sync. Download end verify all blocks. + /// Full sync. Download and verify all blocks. Full, /// Download blocks without executing them. Download latest state with proofs. Fast, -- GitLab From 8d2637905ba920dd1f0e8f1212ec98e45420f514 Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Mon, 13 Nov 2023 21:17:36 +0100 Subject: [PATCH 14/74] review-bot: trigger only on review approvals (#2289) Moved the review event of review-bot to only be triggered in approvals. Because we only update the required reviews when someone approves, this will stop the bot from immediately requesting a new review when someone comments or request changes as they should have been already notified in the first batch. --- .github/workflows/review-trigger.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 1ae6b79ffbd..2810ea356e6 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -13,7 +13,8 @@ on: jobs: trigger-review-bot: - if: github.event.pull_request.draft != true + # (It is not a draft) && (it is not a review || it is an approving review) + if: ${{ github.event.pull_request.draft != true && (github.event_name != 'pull_request_review' || (github.event.review && github.event.review.state == 'APPROVED')) }} runs-on: ubuntu-latest name: trigger review bot steps: -- GitLab From 689b9d91c7e95660682302510b89a5f296098a10 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Tue, 14 Nov 2023 10:37:41 +0200 Subject: [PATCH 15/74] cumulus-pov-recovery: check pov_hash instead of reencoding data (#2287) Collators were previously reencoding the available data and checking the erasure root. Replace that with just checking the PoV hash, which consumes much less CPU and takes less time. We also don't need to check the `PersistedValidationData` hash, as collators don't use it. Reason: https://github.com/paritytech/polkadot-sdk/issues/575#issuecomment-1806572230 After systematic chunks recovery is merged, collators will no longer do any reed-solomon encoding/decoding, which has proven to be a great CPU consumer. Signed-off-by: alindima --- .../src/active_candidate_recovery.rs | 15 +- cumulus/client/pov-recovery/src/lib.rs | 38 ++--- .../src/collator_overseer.rs | 2 +- .../network/availability-recovery/src/lib.rs | 39 ++++- .../network/availability-recovery/src/task.rs | 138 +++++++++++------- 5 files changed, 141 insertions(+), 91 deletions(-) diff --git a/cumulus/client/pov-recovery/src/active_candidate_recovery.rs b/cumulus/client/pov-recovery/src/active_candidate_recovery.rs index 322b19c796a..2c635320ff4 100644 --- a/cumulus/client/pov-recovery/src/active_candidate_recovery.rs +++ b/cumulus/client/pov-recovery/src/active_candidate_recovery.rs @@ -16,12 +16,12 @@ use sp_runtime::traits::Block as BlockT; -use polkadot_node_primitives::AvailableData; +use polkadot_node_primitives::PoV; use polkadot_node_subsystem::messages::AvailabilityRecoveryMessage; use futures::{channel::oneshot, stream::FuturesUnordered, Future, FutureExt, StreamExt}; -use std::{collections::HashSet, pin::Pin}; +use std::{collections::HashSet, pin::Pin, sync::Arc}; use crate::RecoveryHandle; @@ -30,9 +30,8 @@ use crate::RecoveryHandle; /// This handles the candidate recovery and tracks the activate recoveries. pub(crate) struct ActiveCandidateRecovery { /// The recoveries that are currently being executed. - recoveries: FuturesUnordered< - Pin)> + Send>>, - >, + recoveries: + FuturesUnordered>)> + Send>>>, /// The block hashes of the candidates currently being recovered. candidates: HashSet, recovery_handle: Box, @@ -68,7 +67,7 @@ impl ActiveCandidateRecovery { self.recoveries.push( async move { match rx.await { - Ok(Ok(res)) => (block_hash, Some(res)), + Ok(Ok(res)) => (block_hash, Some(res.pov)), Ok(Err(error)) => { tracing::debug!( target: crate::LOG_TARGET, @@ -93,8 +92,8 @@ impl ActiveCandidateRecovery { /// Waits for the next recovery. /// - /// If the returned [`AvailableData`] is `None`, it means that the recovery failed. - pub async fn wait_for_recovery(&mut self) -> (Block::Hash, Option) { + /// If the returned [`PoV`] is `None`, it means that the recovery failed. + pub async fn wait_for_recovery(&mut self) -> (Block::Hash, Option>) { loop { if let Some(res) = self.recoveries.next().await { self.candidates.remove(&res.0); diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index b050bc66799..b9a140f55c6 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -51,7 +51,7 @@ use sc_consensus::import_queue::{ImportQueueService, IncomingBlock}; use sp_consensus::{BlockOrigin, BlockStatus, SyncOracle}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use polkadot_node_primitives::{AvailableData, POV_BOMB_LIMIT}; +use polkadot_node_primitives::{PoV, POV_BOMB_LIMIT}; use polkadot_node_subsystem::messages::AvailabilityRecoveryMessage; use polkadot_overseer::Handle as OverseerHandle; use polkadot_primitives::{ @@ -346,15 +346,11 @@ where } /// Handle a recovered candidate. - async fn handle_candidate_recovered( - &mut self, - block_hash: Block::Hash, - available_data: Option, - ) { - let available_data = match available_data { - Some(data) => { + async fn handle_candidate_recovered(&mut self, block_hash: Block::Hash, pov: Option<&PoV>) { + let pov = match pov { + Some(pov) => { self.candidates_in_retry.remove(&block_hash); - data + pov }, None => if self.candidates_in_retry.insert(block_hash) { @@ -373,18 +369,16 @@ where }, }; - let raw_block_data = match sp_maybe_compressed_blob::decompress( - &available_data.pov.block_data.0, - POV_BOMB_LIMIT, - ) { - Ok(r) => r, - Err(error) => { - tracing::debug!(target: LOG_TARGET, ?error, "Failed to decompress PoV"); + let raw_block_data = + match sp_maybe_compressed_blob::decompress(&pov.block_data.0, POV_BOMB_LIMIT) { + Ok(r) => r, + Err(error) => { + tracing::debug!(target: LOG_TARGET, ?error, "Failed to decompress PoV"); - self.reset_candidate(block_hash); - return - }, - }; + self.reset_candidate(block_hash); + return + }, + }; let block_data = match ParachainBlockData::::decode(&mut &raw_block_data[..]) { Ok(d) => d, @@ -595,10 +589,10 @@ where next_to_recover = self.candidate_recovery_queue.next_recovery().fuse() => { self.recover_candidate(next_to_recover).await; }, - (block_hash, available_data) = + (block_hash, pov) = self.active_candidate_recovery.wait_for_recovery().fuse() => { - self.handle_candidate_recovered(block_hash, available_data).await; + self.handle_candidate_recovered(block_hash, pov.as_deref()).await; }, } } diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs index 379217e4a63..945344f85e9 100644 --- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -102,7 +102,7 @@ fn build_overseer( let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; let builder = Overseer::builder() .availability_distribution(DummySubsystem) - .availability_recovery(AvailabilityRecoverySubsystem::with_availability_store_skip( + .availability_recovery(AvailabilityRecoverySubsystem::for_collator( available_data_req_receiver, Metrics::register(registry)?, )) diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index 9acc48ea92e..4a658449f09 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -105,6 +105,17 @@ pub struct AvailabilityRecoverySubsystem { req_receiver: IncomingRequestReceiver, /// Metrics for this subsystem. metrics: Metrics, + /// The type of check to perform after available data was recovered. + post_recovery_check: PostRecoveryCheck, +} + +#[derive(Clone, PartialEq, Debug)] +/// The type of check to perform after available data was recovered. +pub enum PostRecoveryCheck { + /// Reencode the data and check erasure root. For validators. + Reencode, + /// Only check the pov hash. For collators only. + PovHash, } /// Expensive erasure coding computations that we want to run on a blocking thread. @@ -344,6 +355,7 @@ async fn launch_recovery_task( metrics: &Metrics, recovery_strategies: VecDeque::Sender>>>, bypass_availability_store: bool, + post_recovery_check: PostRecoveryCheck, ) -> error::Result<()> { let candidate_hash = receipt.hash(); let params = RecoveryParams { @@ -354,6 +366,8 @@ async fn launch_recovery_task( erasure_root: receipt.descriptor.erasure_root, metrics: metrics.clone(), bypass_availability_store, + post_recovery_check, + pov_hash: receipt.descriptor.pov_hash, }; let recovery_task = RecoveryTask::new(ctx.sender().clone(), params, recovery_strategies); @@ -390,6 +404,7 @@ async fn handle_recover( erasure_task_tx: futures::channel::mpsc::Sender, recovery_strategy_kind: RecoveryStrategyKind, bypass_availability_store: bool, + post_recovery_check: PostRecoveryCheck, ) -> error::Result<()> { let candidate_hash = receipt.hash(); @@ -486,6 +501,7 @@ async fn handle_recover( metrics, recovery_strategies, bypass_availability_store, + post_recovery_check, ) .await }, @@ -527,15 +543,17 @@ async fn query_chunk_size( #[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)] impl AvailabilityRecoverySubsystem { - /// Create a new instance of `AvailabilityRecoverySubsystem` which never requests the - /// `AvailabilityStoreSubsystem` subsystem. - pub fn with_availability_store_skip( + /// Create a new instance of `AvailabilityRecoverySubsystem` suitable for collator nodes, + /// which never requests the `AvailabilityStoreSubsystem` subsystem and only checks the POV hash + /// instead of reencoding the available data. + pub fn for_collator( req_receiver: IncomingRequestReceiver, metrics: Metrics, ) -> Self { Self { recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower(SMALL_POV_LIMIT), bypass_availability_store: true, + post_recovery_check: PostRecoveryCheck::PovHash, req_receiver, metrics, } @@ -550,6 +568,7 @@ impl AvailabilityRecoverySubsystem { Self { recovery_strategy_kind: RecoveryStrategyKind::BackersFirstAlways, bypass_availability_store: false, + post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, } @@ -563,6 +582,7 @@ impl AvailabilityRecoverySubsystem { Self { recovery_strategy_kind: RecoveryStrategyKind::ChunksAlways, bypass_availability_store: false, + post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, } @@ -577,6 +597,7 @@ impl AvailabilityRecoverySubsystem { Self { recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower(SMALL_POV_LIMIT), bypass_availability_store: false, + post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, } @@ -584,8 +605,13 @@ impl AvailabilityRecoverySubsystem { async fn run(self, mut ctx: Context) -> SubsystemResult<()> { let mut state = State::default(); - let Self { mut req_receiver, metrics, recovery_strategy_kind, bypass_availability_store } = - self; + let Self { + mut req_receiver, + metrics, + recovery_strategy_kind, + bypass_availability_store, + post_recovery_check, + } = self; let (erasure_task_tx, erasure_task_rx) = futures::channel::mpsc::channel(16); let mut erasure_task_rx = erasure_task_rx.fuse(); @@ -675,7 +701,8 @@ impl AvailabilityRecoverySubsystem { &metrics, erasure_task_tx.clone(), recovery_strategy_kind.clone(), - bypass_availability_store + bypass_availability_store, + post_recovery_check.clone() ).await { gum::warn!( target: LOG_TARGET, diff --git a/polkadot/node/network/availability-recovery/src/task.rs b/polkadot/node/network/availability-recovery/src/task.rs index d5bc2da8494..f705d5c0e4c 100644 --- a/polkadot/node/network/availability-recovery/src/task.rs +++ b/polkadot/node/network/availability-recovery/src/task.rs @@ -20,7 +20,7 @@ use crate::{ futures_undead::FuturesUndead, is_chunk_valid, is_unavailable, metrics::Metrics, ErasureTask, - LOG_TARGET, + PostRecoveryCheck, LOG_TARGET, }; use futures::{channel::oneshot, SinkExt}; #[cfg(not(test))] @@ -95,6 +95,12 @@ pub struct RecoveryParams { /// Do not request data from availability-store. Useful for collators. pub bypass_availability_store: bool, + + /// The type of check to perform after available data was recovered. + pub post_recovery_check: PostRecoveryCheck, + + /// The blake2-256 hash of the PoV. + pub pov_hash: Hash, } /// Intermediate/common data that must be passed between `RecoveryStrategy`s belonging to the @@ -501,39 +507,48 @@ impl RecoveryStrategy match response.await { Ok(req_res::v1::AvailableDataFetchingResponse::AvailableData(data)) => { - let (reencode_tx, reencode_rx) = oneshot::channel(); - self.params - .erasure_task_tx - .send(ErasureTask::Reencode( - common_params.n_validators, - common_params.erasure_root, - data, - reencode_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - let reencode_response = - reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; - - if let Some(data) = reencode_response { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - "Received full data", - ); + let maybe_data = match common_params.post_recovery_check { + PostRecoveryCheck::Reencode => { + let (reencode_tx, reencode_rx) = oneshot::channel(); + self.params + .erasure_task_tx + .send(ErasureTask::Reencode( + common_params.n_validators, + common_params.erasure_root, + data, + reencode_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)? + }, + PostRecoveryCheck::PovHash => + (data.pov.hash() == common_params.pov_hash).then_some(data), + }; - return Ok(data) - } else { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - ?validator_index, - "Invalid data response", - ); + match maybe_data { + Some(data) => { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + "Received full data", + ); - // it doesn't help to report the peer with req/res. - } + return Ok(data) + }, + None => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + ?validator_index, + "Invalid data response", + ); + + // it doesn't help to report the peer with req/res. + // we'll try the next backer. + }, + }; }, Ok(req_res::v1::AvailableDataFetchingResponse::NoSuchData) => {}, Err(e) => gum::debug!( @@ -647,22 +662,43 @@ impl FetchChunks { match available_data_response { Ok(data) => { - // Send request to re-encode the chunks and check merkle root. - let (reencode_tx, reencode_rx) = oneshot::channel(); - self.erasure_task_tx - .send(ErasureTask::Reencode( - common_params.n_validators, - common_params.erasure_root, - data, - reencode_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - let reencode_response = - reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; - - if let Some(data) = reencode_response { + let maybe_data = match common_params.post_recovery_check { + PostRecoveryCheck::Reencode => { + // Send request to re-encode the chunks and check merkle root. + let (reencode_tx, reencode_rx) = oneshot::channel(); + self.erasure_task_tx + .send(ErasureTask::Reencode( + common_params.n_validators, + common_params.erasure_root, + data, + reencode_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?.or_else(|| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + "Data recovery error - root mismatch", + ); + None + }) + }, + PostRecoveryCheck::PovHash => + (data.pov.hash() == common_params.pov_hash).then_some(data).or_else(|| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + pov_hash = ?common_params.pov_hash, + "Data recovery error - PoV hash mismatch", + ); + None + }), + }; + + if let Some(data) = maybe_data { gum::trace!( target: LOG_TARGET, candidate_hash = ?common_params.candidate_hash, @@ -673,12 +709,6 @@ impl FetchChunks { Ok(data) } else { recovery_duration.map(|rd| rd.stop_and_discard()); - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - erasure_root = ?common_params.erasure_root, - "Data recovery error - root mismatch", - ); Err(RecoveryError::Invalid) } -- GitLab From b371c3574190ace0d8dd89b7970a388ad3fa8a6a Mon Sep 17 00:00:00 2001 From: drskalman <35698397+drskalman@users.noreply.github.com> Date: Tue, 14 Nov 2023 08:39:44 +0000 Subject: [PATCH 16/74] Fix `ecdsa_bls` verify in BEEFY primitives (#2066) BEEFY ECDSA signatures are on keccak has of the messages. As such we can not simply call `EcdsaBlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref())` because that invokes ecdsa default verification which perfoms blake2 hash which we don't want. This bring up the second issue makes: This makes `sign` and `verify` function in `pair_crypto` useless, at least for BEEFY use case. Moreover, there is no obvious clean way to generate the signature given that pair_crypto does not exposes `sign_prehashed`. You could in theory query the keystore for the pair (could you?), invoke `to_raw` and re-generate each sub-pair and sign using each. But that sounds extremely anticlimactic and will be frow upon by auditors . So I appreciate any alternative suggestion. --------- Co-authored-by: Davide Galassi Co-authored-by: Robert Hambrock --- .../primitives/consensus/beefy/src/lib.rs | 34 ++++++--- .../primitives/core/src/paired_crypto.rs | 76 ++++++++++++++++++- 2 files changed, 96 insertions(+), 14 deletions(-) diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index 5bdf8ce010a..e31c53237be 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -133,7 +133,7 @@ pub mod bls_crypto { ::Output: Into<[u8; 32]>, { fn verify(&self, signature: &::Signature, msg: &[u8]) -> bool { - // `w3f-bls` library uses IETF hashing standard and as such does not exposes + // `w3f-bls` library uses IETF hashing standard and as such does not expose // a choice of hash to field function. // We are directly calling into the library to avoid introducing new host call. // and because BeefyAuthorityId::verify is being called in the runtime so we don't have @@ -157,7 +157,7 @@ pub mod bls_crypto { pub mod ecdsa_bls_crypto { use super::{BeefyAuthorityId, Hash, RuntimeAppPublic, KEY_TYPE}; use sp_application_crypto::{app_crypto, ecdsa_bls377}; - use sp_core::{crypto::Wraps, ecdsa_bls377::Pair as EcdsaBlsPair, Pair as _}; + use sp_core::{crypto::Wraps, ecdsa_bls377::Pair as EcdsaBlsPair}; app_crypto!(ecdsa_bls377, KEY_TYPE); @@ -167,17 +167,24 @@ pub mod ecdsa_bls_crypto { /// Signature for a BEEFY authority using (ECDSA,BLS) as its crypto. pub type AuthoritySignature = Signature; - impl BeefyAuthorityId for AuthorityId + impl BeefyAuthorityId for AuthorityId where - ::Output: Into<[u8; 32]>, + H: Hash, + H::Output: Into<[u8; 32]>, { fn verify(&self, signature: &::Signature, msg: &[u8]) -> bool { - // `w3f-bls` library uses IETF hashing standard and as such does not exposes - // a choice of hash to field function. - // We are directly calling into the library to avoid introducing new host call. - // and because BeefyAuthorityId::verify is being called in the runtime so we don't have - - EcdsaBlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref()) + // We can not simply call + // `EcdsaBlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref())` + // because that invokes ECDSA default verification which perfoms Blake2b hash + // which we don't want. This is because ECDSA signatures are meant to be verified + // on Ethereum network where Keccak hasher is significantly cheaper than Blake2b. + // See Figure 3 of [OnSc21](https://www.scitepress.org/Papers/2021/106066/106066.pdf) + // for comparison. + EcdsaBlsPair::verify_with_hasher::( + signature.as_inner_ref(), + msg, + self.as_inner_ref(), + ) } } } @@ -257,6 +264,7 @@ pub enum ConsensusLog { /// /// A vote message is a direct vote created by a BEEFY node on every voting round /// and is gossiped to its peers. +// TODO: Remove `Signature` generic type, instead get it from `Id::Signature`. #[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] pub struct VoteMessage { /// Commit to information extracted from a finalized block @@ -507,11 +515,15 @@ mod tests { let msg = &b"test-message"[..]; let (pair, _) = ecdsa_bls_crypto::Pair::generate(); - let signature: ecdsa_bls_crypto::Signature = pair.as_inner_ref().sign(&msg).into(); + let signature: ecdsa_bls_crypto::Signature = + pair.as_inner_ref().sign_with_hasher::(&msg).into(); // Verification works if same hashing function is used when signing and verifying. assert!(BeefyAuthorityId::::verify(&pair.public(), &signature, msg)); + // Verification doesn't work if we verify function provided by pair_crypto implementation + assert!(!ecdsa_bls_crypto::Pair::verify(&signature, msg, &pair.public())); + // Other public key doesn't work let (other_pair, _) = ecdsa_bls_crypto::Pair::generate(); assert!(!BeefyAuthorityId::::verify(&other_pair.public(), &signature, msg,)); diff --git a/substrate/primitives/core/src/paired_crypto.rs b/substrate/primitives/core/src/paired_crypto.rs index a97b657e757..edf6156f268 100644 --- a/substrate/primitives/core/src/paired_crypto.rs +++ b/substrate/primitives/core/src/paired_crypto.rs @@ -39,7 +39,13 @@ use sp_std::convert::TryFrom; /// ECDSA and BLS12-377 paired crypto scheme #[cfg(feature = "bls-experimental")] pub mod ecdsa_bls377 { - use crate::{bls377, crypto::CryptoTypeId, ecdsa}; + #[cfg(feature = "full_crypto")] + use crate::Hasher; + use crate::{ + bls377, + crypto::{CryptoTypeId, Pair as PairT, UncheckedFrom}, + ecdsa, + }; /// An identifier used to match public keys against BLS12-377 keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ecb7"); @@ -71,6 +77,60 @@ pub mod ecdsa_bls377 { impl super::CryptoType for Pair { type Pair = Pair; } + + #[cfg(feature = "full_crypto")] + impl Pair { + /// Hashes the `message` with the specified [`Hasher`] before signing sith the ECDSA secret + /// component. + /// + /// The hasher does not affect the BLS12-377 component. This generates BLS12-377 Signature + /// according to IETF standard. + pub fn sign_with_hasher(&self, message: &[u8]) -> Signature + where + H: Hasher, + H::Out: Into<[u8; 32]>, + { + let msg_hash = H::hash(message).into(); + + let mut raw: [u8; SIGNATURE_LEN] = [0u8; SIGNATURE_LEN]; + raw[..ecdsa::SIGNATURE_SERIALIZED_SIZE] + .copy_from_slice(self.left.sign_prehashed(&msg_hash).as_ref()); + raw[ecdsa::SIGNATURE_SERIALIZED_SIZE..] + .copy_from_slice(self.right.sign(message).as_ref()); + ::Signature::unchecked_from(raw) + } + + /// Hashes the `message` with the specified [`Hasher`] before verifying with the ECDSA + /// public component. + /// + /// The hasher does not affect the the BLS12-377 component. This verifies whether the + /// BLS12-377 signature was hashed and signed according to IETF standard + pub fn verify_with_hasher(sig: &Signature, message: &[u8], public: &Public) -> bool + where + H: Hasher, + H::Out: Into<[u8; 32]>, + { + let msg_hash = H::hash(message).into(); + + let Ok(left_pub) = public.0[..ecdsa::PUBLIC_KEY_SERIALIZED_SIZE].try_into() else { + return false + }; + let Ok(left_sig) = sig.0[0..ecdsa::SIGNATURE_SERIALIZED_SIZE].try_into() else { + return false + }; + if !ecdsa::Pair::verify_prehashed(&left_sig, &msg_hash, &left_pub) { + return false + } + + let Ok(right_pub) = public.0[ecdsa::PUBLIC_KEY_SERIALIZED_SIZE..].try_into() else { + return false + }; + let Ok(right_sig) = sig.0[ecdsa::SIGNATURE_SERIALIZED_SIZE..].try_into() else { + return false + }; + bls377::Pair::verify(&right_sig, message.as_ref(), &right_pub) + } + } } /// Secure seed length. @@ -455,12 +515,12 @@ where #[cfg(all(test, feature = "bls-experimental"))] mod test { use super::*; - use crate::crypto::DEV_PHRASE; + use crate::{crypto::DEV_PHRASE, KeccakHasher}; use ecdsa_bls377::{Pair, Signature}; use crate::{bls377, ecdsa}; - #[test] + #[test] fn test_length_of_paired_ecdsa_and_bls377_public_key_and_signature_is_correct() { assert_eq!( ::Public::LEN, @@ -617,6 +677,16 @@ mod test { assert_eq!(cmp, public); } + #[test] + fn sign_and_verify_with_hasher_works() { + let pair = + Pair::from_seed(&(b"12345678901234567890123456789012".as_slice().try_into().unwrap())); + let message = b"Something important"; + let signature = pair.sign_with_hasher::(&message[..]); + + assert!(Pair::verify_with_hasher::(&signature, &message[..], &pair.public())); + } + #[test] fn signature_serialization_works() { let pair = -- GitLab From ae1bdcfb91a26c5f65c5ca534aa8a04523ca2277 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Tue, 14 Nov 2023 10:43:40 +0100 Subject: [PATCH 17/74] Fix `expect_pallet` benchmarks not relaying on hard-coded `frame_system` dependency version (#2288) ## Problem/Motivation The benchmark for the `ExpectPallet` XCM instruction uses a hard-coded version `4.0.0` for the `frame_system` pallet. Unfortunately, this doesn't work for the `polkadot-fellows/runtimes` repository, where we use dependencies from `crates.io`, e.g., [frame-system::23.0.0.0](https://github.com/polkadot-fellows/runtimes/blob/dd7f86f0d50064481ed0b7c0218494a5cfad997e/relay/kusama/Cargo.toml#L83). Closes: https://github.com/paritytech/polkadot-sdk/issues/2284 ## Solution This PR fixes the benchmarks that require pallet information and enables the runtime to provide the correct/custom pallet information. The default implementation provides `frame_system::Pallet` with index `0`, where the version is not hard-coded but read from the runtime. ## Local testing Added log for `T::valid_pallet` to the benchmarks like: ``` let valid_pallet = T::valid_pallet(); log::info!( target: "frame::benchmark::pallet", "valid_pallet: {}::{}::{}::{}::{}", valid_pallet.index, valid_pallet.module_name, valid_pallet.crate_version.major, valid_pallet.crate_version.minor, valid_pallet.crate_version.patch, ); ``` Run benchmarks for `westend`: ``` cargo run --bin=polkadot --features=runtime-benchmarks -- benchmark pallet --steps=2 --repeat=1 --extrinsic=* --heap-pages=4096 --json-file=./bench.json --chain=westend-dev --template=./polkadot/xcm/pallet-xcm-benchmarks/template.hbs --pallet=pallet_xcm_benchmarks::generic --output=./polkadot/runtime/westend/src/weights/xcm ``` --- For actual `frame_system` version: ``` [package] name = "frame-system" version = "4.0.0-dev" ``` Log dump: ``` 2023-11-13 12:56:45 Starting benchmark: pallet_xcm_benchmarks::generic::query_pallet 2023-11-13 12:56:45 valid_pallet: 0::frame_system::4::0::0 2023-11-13 12:56:45 valid_pallet: 0::frame_system::4::0::0 2023-11-13 12:56:45 valid_pallet: 0::frame_system::4::0::0 2023-11-13 12:56:45 Starting benchmark: pallet_xcm_benchmarks::generic::expect_pallet 2023-11-13 12:56:45 valid_pallet: 0::frame_system::4::0::0 2023-11-13 12:56:45 valid_pallet: 0::frame_system::4::0::0 2023-11-13 12:56:45 valid_pallet: 0::frame_system::4::0::0 ``` For changed `frame_system` version: ``` [package] name = "frame-system" version = "5.1.3-dev" ``` Log dump: ``` 2023-11-13 12:51:51 Starting benchmark: pallet_xcm_benchmarks::generic::query_pallet 2023-11-13 12:51:51 valid_pallet: 0::frame_system::5::1::3 2023-11-13 12:51:51 valid_pallet: 0::frame_system::5::1::3 2023-11-13 12:51:51 valid_pallet: 0::frame_system::5::1::3 2023-11-13 12:51:51 Starting benchmark: pallet_xcm_benchmarks::generic::expect_pallet 2023-11-13 12:51:51 valid_pallet: 0::frame_system::5::1::3 2023-11-13 12:51:51 valid_pallet: 0::frame_system::5::1::3 2023-11-13 12:51:51 valid_pallet: 0::frame_system::5::1::3 ``` ## References Closes: https://github.com/paritytech/polkadot-sdk/issues/2284 --- .../src/generic/benchmarking.rs | 15 ++++++++------- .../xcm/pallet-xcm-benchmarks/src/generic/mod.rs | 12 ++++++++++++ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 4a997666027..f1c48ba9b83 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -413,8 +413,9 @@ benchmarks! { executor.set_holding(expected_assets_in_holding.into()); } + let valid_pallet = T::valid_pallet(); let instruction = Instruction::QueryPallet { - module_name: b"frame_system".to_vec(), + module_name: valid_pallet.module_name.as_bytes().to_vec(), response_info: QueryResponseInfo { destination, query_id, max_weight }, }; let xcm = Xcm(vec![instruction]); @@ -428,13 +429,13 @@ benchmarks! { expect_pallet { let mut executor = new_executor::(Default::default()); - + let valid_pallet = T::valid_pallet(); let instruction = Instruction::ExpectPallet { - index: 0, - name: b"System".to_vec(), - module_name: b"frame_system".to_vec(), - crate_major: 4, - min_crate_minor: 0, + index: valid_pallet.index as u32, + name: valid_pallet.name.as_bytes().to_vec(), + module_name: valid_pallet.module_name.as_bytes().to_vec(), + crate_major: valid_pallet.crate_version.major.into(), + min_crate_minor: valid_pallet.crate_version.minor.into(), }; let xcm = Xcm(vec![instruction]); }: { diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs index cbdfa8d0112..11f7bba19a9 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs @@ -91,6 +91,18 @@ pub mod pallet { /// /// If set to `Err`, benchmarks which rely on a universal alias will be skipped. fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError>; + + /// Returns a valid pallet info for `ExpectPallet` or `QueryPallet` benchmark. + /// + /// By default returns `frame_system::Pallet` info with expected pallet index `0`. + fn valid_pallet() -> frame_support::traits::PalletInfoData { + frame_support::traits::PalletInfoData { + index: as frame_support::traits::PalletInfoAccess>::index(), + name: as frame_support::traits::PalletInfoAccess>::name(), + module_name: as frame_support::traits::PalletInfoAccess>::module_name(), + crate_version: as frame_support::traits::PalletInfoAccess>::crate_version(), + } + } } #[pallet::pallet] -- GitLab From 54ca4f131b82f45b0c2d1f316d65d7c97ad9a99b Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Tue, 14 Nov 2023 15:01:15 +0400 Subject: [PATCH 18/74] Delete undecodable Westend Asset Hub `Balances::Hold` and `Nfts::ItemMetadataOf` (#2309) Closes https://github.com/paritytech/polkadot-sdk/issues/2241 See issue comments for more details about this storage. --- .../assets/asset-hub-westend/src/lib.rs | 78 ++++++++++++++++++- 1 file changed, 75 insertions(+), 3 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index d88aa2607e2..cd17b9d86f7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -54,7 +54,7 @@ use frame_system::{ EnsureRoot, EnsureSigned, EnsureSignedBy, }; use pallet_asset_conversion_tx_payment::AssetConversionAdapter; -use pallet_nfts::PalletFeatures; +use pallet_nfts::{DestroyWitness, PalletFeatures}; use pallet_xcm::EnsureXcm; pub use parachains_common as common; use parachains_common::{ @@ -69,7 +69,9 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Verify}, + traits::{ + AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Saturating, Verify, + }, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Perbill, Permill, RuntimeDebug, }; @@ -944,8 +946,79 @@ pub type Migrations = ( pallet_multisig::migrations::v1::MigrateToV1, // unreleased InitStorageVersions, + // unreleased + DeleteUndecodableStorage, ); +/// Asset Hub Westend has some undecodable storage, delete it. +/// See for more info. +/// +/// First we remove the bad Hold, then the bad NFT collection. +pub struct DeleteUndecodableStorage; + +impl frame_support::traits::OnRuntimeUpgrade for DeleteUndecodableStorage { + fn on_runtime_upgrade() -> Weight { + use sp_core::crypto::Ss58Codec; + + let mut writes = 0; + + // Remove Holds for account with undecodable hold + // Westend doesn't have any HoldReasons implemented yet, so it's safe to just blanket remove + // any for this account. + match AccountId::from_ss58check("5GCCJthVSwNXRpbeg44gysJUx9vzjdGdfWhioeM7gCg6VyXf") { + Ok(a) => { + log::info!("Removing holds for account with bad hold"); + pallet_balances::Holds::::remove(a); + writes.saturating_inc(); + }, + Err(_) => { + log::error!("CleanupUndecodableStorage: Somehow failed to convert valid SS58 address into an AccountId!"); + }, + }; + + // Destroy undecodable NFT item 1 + writes.saturating_inc(); + match pallet_nfts::Pallet::::do_burn(3, 1, |_| Ok(())) { + Ok(_) => { + log::info!("Destroyed undecodable NFT item 1"); + }, + Err(e) => { + log::error!("Failed to destroy undecodable NFT item: {:?}", e); + return ::DbWeight::get().reads_writes(0, writes) + }, + } + + // Destroy undecodable NFT item 2 + writes.saturating_inc(); + match pallet_nfts::Pallet::::do_burn(3, 2, |_| Ok(())) { + Ok(_) => { + log::info!("Destroyed undecodable NFT item 2"); + }, + Err(e) => { + log::error!("Failed to destroy undecodable NFT item: {:?}", e); + return ::DbWeight::get().reads_writes(0, writes) + }, + } + + // Finally, we can destroy the collection + writes.saturating_inc(); + match pallet_nfts::Pallet::::do_destroy_collection( + 3, + DestroyWitness { attributes: 0, item_metadatas: 1, item_configs: 0 }, + None, + ) { + Ok(_) => { + log::info!("Destroyed undecodable NFT collection"); + }, + Err(e) => { + log::error!("Failed to destroy undecodable NFT collection: {:?}", e); + }, + }; + + ::DbWeight::get().reads_writes(0, writes) + } +} + /// Migration to initialize storage versions for pallets added after genesis. /// /// Ideally this would be done automatically (see @@ -957,7 +1030,6 @@ pub struct InitStorageVersions; impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { fn on_runtime_upgrade() -> Weight { use frame_support::traits::{GetStorageVersion, StorageVersion}; - use sp_runtime::traits::Saturating; let mut writes = 0; -- GitLab From a393cfcb28282cd2147134b8968fba3b41681ea9 Mon Sep 17 00:00:00 2001 From: Hugo Trentesaux Date: Tue, 14 Nov 2023 12:57:39 +0100 Subject: [PATCH 19/74] Add details in `--dev` cli flag documentation (#2305) add details in `--dev` flag to tell that it disables local peer discovery ### Context When adding automated end-to-end tests, we replaced `--dev` by ``` `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, `--alice`, and `--tmp` flags ``` as stated in the command line documentation. But the tests started failing due to the nodes connecting to each other. ### Fix This PR includes additional command line documentation to explain more in detail what `--dev` flag inludes. --- substrate/client/cli/src/params/shared_params.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/substrate/client/cli/src/params/shared_params.rs b/substrate/client/cli/src/params/shared_params.rs index 6419e15c62a..465372fba17 100644 --- a/substrate/client/cli/src/params/shared_params.rs +++ b/substrate/client/cli/src/params/shared_params.rs @@ -35,6 +35,7 @@ pub struct SharedParams { /// /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, /// `--alice`, and `--tmp` flags, unless explicitly overridden. + /// It also disables local peer discovery (see --no-mdns and --discover-local) #[arg(long, conflicts_with_all = &["chain"])] pub dev: bool, -- GitLab From 39cc95740aa03f53a0e8bfb8537f0ddb5fa3e19e Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 14 Nov 2023 14:47:04 +0200 Subject: [PATCH 20/74] xcm-emulator: add Rococo<>Westend bridge and add tests for assets transfers over the bridge (#2251) - switch from Rococo<>Wococo to Rococo<>Westend bridge - add bidirectional simple tests - remove Wococo chains from xcm-emulator - added tests for assets transfers over Rococo<>Westend bridge fixes https://github.com/paritytech/parity-bridges-common/issues/2405 --- Cargo.lock | 78 +------ Cargo.toml | 5 +- .../assets/asset-hub-rococo/src/lib.rs | 4 +- .../assets/asset-hub-westend/src/lib.rs | 4 +- .../assets/asset-hub-wococo/Cargo.toml | 26 --- .../assets/asset-hub-wococo/src/lib.rs | 53 ----- .../bridges/bridge-hub-rococo/src/lib.rs | 2 +- .../bridges/bridge-hub-westend/src/genesis.rs | 2 +- .../bridges/bridge-hub-westend/src/lib.rs | 2 +- .../bridges/bridge-hub-wococo/Cargo.toml | 25 -- .../bridges/bridge-hub-wococo/src/lib.rs | 47 ---- .../parachains/testing/penpal/src/lib.rs | 2 +- .../emulated/chains/relays/wococo/Cargo.toml | 30 --- .../emulated/chains/relays/wococo/src/lib.rs | 46 ---- .../emulated/common/src/impls.rs | 151 ++++++++++-- .../Cargo.toml | 10 +- .../src/lib.rs | 60 ++--- .../networks/wococo-system/Cargo.toml | 16 -- .../networks/wococo-system/src/lib.rs | 50 ---- .../bridges/bridge-hub-rococo/Cargo.toml | 6 +- .../bridges/bridge-hub-rococo/src/lib.rs | 23 +- .../src/tests/asset_transfers.rs | 219 ++++++++++++++++++ .../bridge-hub-rococo/src/tests/mod.rs | 3 +- .../src/tests/{example.rs => send_xcm.rs} | 28 +-- .../bridges/bridge-hub-westend/Cargo.toml | 6 +- .../bridges/bridge-hub-westend/src/lib.rs | 27 ++- .../src/tests/asset_transfers.rs | 218 +++++++++++++++++ .../bridge-hub-westend/src/tests/mod.rs | 3 +- .../src/tests/{example.rs => send_xcm.rs} | 30 ++- .../bridge-hub-westend/src/tests/teleport.rs | 2 +- 30 files changed, 710 insertions(+), 468 deletions(-) delete mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml delete mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs delete mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml delete mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs delete mode 100644 cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml delete mode 100644 cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs rename cumulus/parachains/integration-tests/emulated/networks/{rococo-wococo-system => rococo-westend-system}/Cargo.toml (57%) rename cumulus/parachains/integration-tests/emulated/networks/{rococo-wococo-system => rococo-westend-system}/src/lib.rs (57%) delete mode 100644 cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml delete mode 100644 cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs rename cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/{example.rs => send_xcm.rs} (77%) create mode 100644 cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs rename cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/{example.rs => send_xcm.rs} (71%) diff --git a/Cargo.lock b/Cargo.lock index 4a45d5c602e..32d9099b386 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1119,22 +1119,6 @@ dependencies = [ "westend-runtime-constants", ] -[[package]] -name = "asset-hub-wococo-emulated-chain" -version = "0.0.0" -dependencies = [ - "asset-hub-rococo-emulated-chain", - "asset-hub-rococo-runtime", - "cumulus-primitives-core", - "emulated-integration-tests-common", - "frame-support", - "parachains-common", - "serde_json", - "sp-core", - "sp-runtime", - "wococo-emulated-chain", -] - [[package]] name = "asset-test-utils" version = "1.0.0" @@ -2202,12 +2186,14 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", "frame-support", + "pallet-assets", + "pallet-balances", "pallet-bridge-messages", "pallet-message-queue", "pallet-xcm", "parachains-common", "parity-scale-codec", - "rococo-wococo-system-emulated-network", + "rococo-westend-system-emulated-network", "staging-xcm", "staging-xcm-executor", ] @@ -2370,14 +2356,16 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", "frame-support", + "pallet-assets", + "pallet-balances", "pallet-bridge-messages", "pallet-message-queue", "pallet-xcm", "parachains-common", "parity-scale-codec", + "rococo-westend-system-emulated-network", "staging-xcm", "staging-xcm-executor", - "westend-system-emulated-network", ] [[package]] @@ -2463,21 +2451,6 @@ dependencies = [ "westend-runtime-constants", ] -[[package]] -name = "bridge-hub-wococo-emulated-chain" -version = "0.0.0" -dependencies = [ - "bridge-hub-rococo-emulated-chain", - "bridge-hub-rococo-runtime", - "cumulus-primitives-core", - "emulated-integration-tests-common", - "frame-support", - "parachains-common", - "serde_json", - "sp-core", - "sp-runtime", -] - [[package]] name = "bridge-runtime-common" version = "0.1.0" @@ -14578,16 +14551,16 @@ dependencies = [ ] [[package]] -name = "rococo-wococo-system-emulated-network" +name = "rococo-westend-system-emulated-network" version = "0.0.0" dependencies = [ "asset-hub-rococo-emulated-chain", - "asset-hub-wococo-emulated-chain", + "asset-hub-westend-emulated-chain", "bridge-hub-rococo-emulated-chain", - "bridge-hub-wococo-emulated-chain", + "bridge-hub-westend-emulated-chain", "emulated-integration-tests-common", "rococo-emulated-chain", - "wococo-emulated-chain", + "westend-emulated-chain", ] [[package]] @@ -21236,37 +21209,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "wococo-emulated-chain" -version = "0.0.0" -dependencies = [ - "emulated-integration-tests-common", - "pallet-im-online", - "parachains-common", - "polkadot-primitives", - "rococo-emulated-chain", - "rococo-runtime", - "rococo-runtime-constants", - "sc-consensus-grandpa", - "serde_json", - "sp-authority-discovery", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-core", - "sp-runtime", -] - -[[package]] -name = "wococo-system-emulated-network" -version = "0.0.0" -dependencies = [ - "asset-hub-wococo-emulated-chain", - "bridge-hub-wococo-emulated-chain", - "emulated-integration-tests-common", - "penpal-emulated-chain", - "wococo-emulated-chain", -] - [[package]] name = "wyz" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index 30445bd5945..15a7d5c35bf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,16 +71,13 @@ members = [ "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend", "cumulus/parachains/integration-tests/emulated/common", "cumulus/parachains/integration-tests/emulated/chains/relays/rococo", - "cumulus/parachains/integration-tests/emulated/chains/relays/wococo", "cumulus/parachains/integration-tests/emulated/chains/relays/westend", "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo", - "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo", "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend", "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo", "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend", "cumulus/parachains/integration-tests/emulated/networks/rococo-system", - "cumulus/parachains/integration-tests/emulated/networks/wococo-system", - "cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system", + "cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system", "cumulus/parachains/integration-tests/emulated/networks/westend-system", "cumulus/parachains/pallets/collective-content", "cumulus/parachains/pallets/parachain-info", diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs index f94c4c3d255..1ed22b3cc4f 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs @@ -21,7 +21,8 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, impl_foreign_assets_helpers_for_parachain, impls::Parachain, + xcm_emulator::decl_test_parachains, }; use rococo_emulated_chain::Rococo; @@ -53,3 +54,4 @@ decl_test_parachains! { impl_accounts_helpers_for_parachain!(AssetHubRococo); impl_assert_events_helpers_for_parachain!(AssetHubRococo, false); impl_assets_helpers_for_parachain!(AssetHubRococo, Rococo); +impl_foreign_assets_helpers_for_parachain!(AssetHubRococo, Rococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs index 73d777247a5..4dcdb613ac2 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs @@ -21,7 +21,8 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, impl_foreign_assets_helpers_for_parachain, impls::Parachain, + xcm_emulator::decl_test_parachains, }; use westend_emulated_chain::Westend; @@ -53,3 +54,4 @@ decl_test_parachains! { impl_accounts_helpers_for_parachain!(AssetHubWestend); impl_assert_events_helpers_for_parachain!(AssetHubWestend, false); impl_assets_helpers_for_parachain!(AssetHubWestend, Westend); +impl_foreign_assets_helpers_for_parachain!(AssetHubWestend, Westend); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml deleted file mode 100644 index 0f212c15999..00000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "asset-hub-wococo-emulated-chain" -version = "0.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Asset Hub Wococo emulated chain" -publish = false - -[dependencies] -serde_json = "1.0.104" - -# Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } - -# Polakadot -parachains-common = { path = "../../../../../../../parachains/common" } - -# Cumulus -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -asset-hub-rococo-runtime = { path = "../../../../../../runtimes/assets/asset-hub-rococo" } -wococo-emulated-chain = { path = "../../../relays/wococo" } -asset-hub-rococo-emulated-chain = { path = "../asset-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs deleted file mode 100644 index 38a6ece3472..00000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Substrate -use frame_support::traits::OnInitialize; - -// Cumulus -use emulated_integration_tests_common::{ - impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, -}; -use wococo_emulated_chain::Wococo; - -// AssetHubWococo Parachain declaration -decl_test_parachains! { - pub struct AssetHubWococo { - genesis = asset_hub_rococo_emulated_chain::genesis::genesis(), - on_init = { - asset_hub_rococo_runtime::AuraExt::on_initialize(1); - }, - runtime = asset_hub_rococo_runtime, - core = { - XcmpMessageHandler: asset_hub_rococo_runtime::XcmpQueue, - LocationToAccountId: asset_hub_rococo_runtime::xcm_config::LocationToAccountId, - ParachainInfo: asset_hub_rococo_runtime::ParachainInfo, - }, - pallets = { - PolkadotXcm: asset_hub_rococo_runtime::PolkadotXcm, - Assets: asset_hub_rococo_runtime::Assets, - ForeignAssets: asset_hub_rococo_runtime::ForeignAssets, - PoolAssets: asset_hub_rococo_runtime::PoolAssets, - AssetConversion: asset_hub_rococo_runtime::AssetConversion, - Balances: asset_hub_rococo_runtime::Balances, - } - }, -} - -// AssetHubWococo implementation -impl_accounts_helpers_for_parachain!(AssetHubWococo); -impl_assert_events_helpers_for_parachain!(AssetHubWococo, false); -impl_assets_helpers_for_parachain!(AssetHubWococo, Wococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs index f4557021f62..ea0c9513abc 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs @@ -21,7 +21,7 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - xcm_emulator::decl_test_parachains, + impls::Parachain, xcm_emulator::decl_test_parachains, }; // BridgeHubRococo Parachain declaration diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs index cd578d6862f..2eb7e0ddbd2 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs @@ -22,7 +22,7 @@ use emulated_integration_tests_common::{ }; use parachains_common::Balance; -pub const PARA_ID: u32 = 1013; +pub const PARA_ID: u32 = 1002; pub const ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; pub fn genesis() -> Storage { diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs index 1f1126d4565..4a130ac1f27 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs @@ -21,7 +21,7 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - xcm_emulator::decl_test_parachains, + impls::Parachain, xcm_emulator::decl_test_parachains, }; // BridgeHubWestend Parachain declaration diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml deleted file mode 100644 index 0b02730a50c..00000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "bridge-hub-wococo-emulated-chain" -version = "0.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Bridge Hub Wococo emulated chain" -publish = false - -[dependencies] -serde_json = "1.0.104" - -# Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } - -# Polakadot -parachains-common = { path = "../../../../../../../parachains/common" } - -# Cumulus -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -bridge-hub-rococo-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-rococo" } -bridge-hub-rococo-emulated-chain = { path = "../bridge-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs deleted file mode 100644 index e643f104aa3..00000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Substrate -use frame_support::traits::OnInitialize; - -// Cumulus -use emulated_integration_tests_common::{ - impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - xcm_emulator::decl_test_parachains, -}; - -// BridgeHubWococo Parachain declaration -decl_test_parachains! { - pub struct BridgeHubWococo { - genesis = bridge_hub_rococo_emulated_chain::genesis::genesis(), - on_init = { - bridge_hub_rococo_runtime::AuraExt::on_initialize(1); - }, - runtime = bridge_hub_rococo_runtime, - core = { - XcmpMessageHandler: bridge_hub_rococo_runtime::XcmpQueue, - LocationToAccountId: bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, - ParachainInfo: bridge_hub_rococo_runtime::ParachainInfo, - }, - pallets = { - PolkadotXcm: bridge_hub_rococo_runtime::PolkadotXcm, - Balances: bridge_hub_rococo_runtime::Balances, - } - }, -} - -// BridgeHubWococo implementation -impl_accounts_helpers_for_parachain!(BridgeHubWococo); -impl_assert_events_helpers_for_parachain!(BridgeHubWococo, false); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index 537f96f45b4..f9a422bfcba 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -22,7 +22,7 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, impls::Parachain, xcm_emulator::decl_test_parachains, }; use rococo_emulated_chain::Rococo; diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml deleted file mode 100644 index 51a87954b8c..00000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "wococo-emulated-chain" -version = "0.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Wococo emulated chain" -publish = false - -[dependencies] -serde_json = "1.0.104" - -# Substrate -sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -sp-authority-discovery = { path = "../../../../../../../substrate/primitives/authority-discovery", default-features = false } -sp-consensus-babe = { path = "../../../../../../../substrate/primitives/consensus/babe", default-features = false } -beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../../../substrate/primitives/consensus/beefy" } -grandpa = { package = "sc-consensus-grandpa", path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } -pallet-im-online = { path = "../../../../../../../substrate/frame/im-online", default-features = false } - -# Polkadot -polkadot-primitives = { path = "../../../../../../../polkadot/primitives", default-features = false } -rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococo/constants", default-features = false } -rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } - -# Cumulus -parachains-common = { path = "../../../../../../parachains/common" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -rococo-emulated-chain = { path = "../rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs deleted file mode 100644 index a04deee330f..00000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Cumulus -use emulated_integration_tests_common::{ - impl_accounts_helpers_for_relay_chain, impl_assert_events_helpers_for_relay_chain, - impl_hrmp_channels_helpers_for_relay_chain, impl_send_transact_helpers_for_relay_chain, - xcm_emulator::decl_test_relay_chains, -}; - -// Wococo declaration -decl_test_relay_chains! { - #[api_version(8)] - pub struct Wococo { - genesis = rococo_emulated_chain::genesis::genesis(), - on_init = (), - runtime = rococo_runtime, - core = { - SovereignAccountOf: rococo_runtime::xcm_config::LocationConverter, - }, - pallets = { - XcmPallet: rococo_runtime::XcmPallet, - Sudo: rococo_runtime::Sudo, - Balances: rococo_runtime::Balances, - Hrmp: rococo_runtime::Hrmp, - } - }, -} - -// Wococo implementation -impl_accounts_helpers_for_relay_chain!(Wococo); -impl_assert_events_helpers_for_relay_chain!(Wococo); -impl_hrmp_channels_helpers_for_relay_chain!(Wococo); -impl_send_transact_helpers_for_relay_chain!(Wococo); diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 82f27b93200..8c94df6d888 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -47,7 +47,8 @@ pub use xcm::{ pub use cumulus_pallet_parachain_system; pub use cumulus_pallet_xcmp_queue; pub use cumulus_primitives_core::{ - relay_chain::HrmpChannelId, DmpMessageHandler, ParaId, XcmpMessageHandler, + relay_chain::HrmpChannelId, DmpMessageHandler, Junction, Junctions, NetworkId, ParaId, + XcmpMessageHandler, }; pub use parachains_common::{AccountId, Balance}; pub use xcm_emulator::{ @@ -62,11 +63,14 @@ use bp_messages::{ LaneId, MessageKey, OutboundLaneData, }; use bridge_runtime_common::messages_xcm_extension::XcmBlobMessageDispatchResult; -pub use pallet_bridge_messages::Instance2 as BridgeMessagesInstance2; -use pallet_bridge_messages::{Config, Instance1, OutboundLanes, Pallet}; +use pallet_bridge_messages::{Config, OutboundLanes, Pallet}; +pub use pallet_bridge_messages::{ + Instance1 as BridgeMessagesInstance1, Instance2 as BridgeMessagesInstance2, + Instance3 as BridgeMessagesInstance3, +}; -pub struct BridgeHubMessageHandler { - _marker: std::marker::PhantomData<(S, T, I)>, +pub struct BridgeHubMessageHandler { + _marker: std::marker::PhantomData<(S, SI, T, TI)>, } struct LaneIdWrapper(LaneId); @@ -83,13 +87,14 @@ impl From for LaneIdWrapper { } } -impl BridgeMessageHandler for BridgeHubMessageHandler +impl BridgeMessageHandler for BridgeHubMessageHandler where - S: Config, - T: Config, - I: 'static, - >::InboundPayload: From>, - >::MessageDispatch: + S: Config, + SI: 'static, + T: Config, + TI: 'static, + >::InboundPayload: From>, + >::MessageDispatch: MessageDispatch, { fn get_source_outbound_messages() -> Vec { @@ -100,16 +105,13 @@ where // collect messages from `OutboundMessages` for each active outbound lane in the source for lane in active_lanes { - let latest_generated_nonce = - OutboundLanes::::get(lane).latest_generated_nonce; - let latest_received_nonce = - OutboundLanes::::get(lane).latest_received_nonce; + let latest_generated_nonce = OutboundLanes::::get(lane).latest_generated_nonce; + let latest_received_nonce = OutboundLanes::::get(lane).latest_received_nonce; (latest_received_nonce + 1..=latest_generated_nonce).for_each(|nonce| { - let encoded_payload: Vec = - Pallet::::outbound_message_data(*lane, nonce) - .expect("Bridge message does not exist") - .into(); + let encoded_payload: Vec = Pallet::::outbound_message_data(*lane, nonce) + .expect("Bridge message does not exist") + .into(); let payload = Vec::::decode(&mut &encoded_payload[..]) .expect("Decodign XCM message failed"); let id: u32 = LaneIdWrapper(*lane).into(); @@ -133,9 +135,9 @@ where // Directly dispatch outbound messages assuming everything is correct // and bypassing the `Relayers` and `InboundLane` logic - let dispatch_result = TargetMessageDispatch::::dispatch(DispatchMessage { + let dispatch_result = TargetMessageDispatch::::dispatch(DispatchMessage { key: MessageKey { lane_id, nonce }, - data: DispatchMessageData::> { payload }, + data: DispatchMessageData::> { payload }, }); let result = match dispatch_result.dispatch_level_result { @@ -151,14 +153,14 @@ where } fn notify_source_message_delivery(lane_id: u32) { - let data = OutboundLanes::::get(LaneIdWrapper::from(lane_id).0); + let data = OutboundLanes::::get(LaneIdWrapper::from(lane_id).0); let new_data = OutboundLaneData { oldest_unpruned_nonce: data.oldest_unpruned_nonce + 1, latest_received_nonce: data.latest_received_nonce + 1, ..data }; - OutboundLanes::::insert(LaneIdWrapper::from(lane_id).0, new_data); + OutboundLanes::::insert(LaneIdWrapper::from(lane_id).0, new_data); } } @@ -392,6 +394,23 @@ macro_rules! impl_accounts_helpers_for_parachain { } }); } + + /// Return local sovereign account of `para_id` on other `network_id` + pub fn sovereign_account_of_parachain_on_other_global_consensus( + network_id: $crate::impls::NetworkId, + para_id: $crate::impls::ParaId, + ) -> $crate::impls::AccountId { + let remote_location = $crate::impls::MultiLocation { + parents: 2, + interior: $crate::impls::Junctions::X2( + $crate::impls::Junction::GlobalConsensus(network_id), + $crate::impls::Junction::Parachain(para_id.into()), + ), + }; + ::execute_with(|| { + Self::sovereign_account_id_of(remote_location) + }) + } } } }; @@ -614,7 +633,9 @@ macro_rules! impl_assets_helpers_for_parachain { $crate::impls::assert_expected_events!( Self, vec![ - RuntimeEvent::::Assets($crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount }) => { + RuntimeEvent::::Assets( + $crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount } + ) => { asset_id: *asset_id == id, owner: *owner == beneficiary.clone().into(), amount: *amount == amount_to_mint, @@ -687,3 +708,85 @@ macro_rules! impl_assets_helpers_for_parachain { } }; } + +#[macro_export] +macro_rules! impl_foreign_assets_helpers_for_parachain { + ( $chain:ident, $relay_chain:ident ) => { + $crate::impls::paste::paste! { + impl $chain { + /// Create foreign assets using sudo `ForeignAssets::force_create()` + pub fn force_create_foreign_asset( + id: $crate::impls::MultiLocation, + owner: $crate::impls::AccountId, + is_sufficient: bool, + min_balance: u128, + prefund_accounts: Vec<($crate::impls::AccountId, u128)>, + ) { + use $crate::impls::Inspect; + let sudo_origin = <$chain as $crate::impls::Chain>::RuntimeOrigin::root(); + ::execute_with(|| { + $crate::impls::assert_ok!( + ]>::ForeignAssets::force_create( + sudo_origin, + id, + owner.clone().into(), + is_sufficient, + min_balance, + ) + ); + assert!(]>::ForeignAssets::asset_exists(id)); + type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; + $crate::impls::assert_expected_events!( + Self, + vec![ + RuntimeEvent::::ForeignAssets( + $crate::impls::pallet_assets::Event::ForceCreated { + asset_id, + .. + } + ) => { asset_id: *asset_id == id, }, + ] + ); + }); + for (beneficiary, amount) in prefund_accounts.into_iter() { + let signed_origin = + <$chain as $crate::impls::Chain>::RuntimeOrigin::signed(owner.clone()); + Self::mint_foreign_asset(signed_origin, id, beneficiary, amount); + } + } + + /// Mint assets making use of the ForeignAssets pallet-assets instance + pub fn mint_foreign_asset( + signed_origin: ::RuntimeOrigin, + id: $crate::impls::MultiLocation, + beneficiary: $crate::impls::AccountId, + amount_to_mint: u128, + ) { + ::execute_with(|| { + $crate::impls::assert_ok!(]>::ForeignAssets::mint( + signed_origin, + id.into(), + beneficiary.clone().into(), + amount_to_mint + )); + + type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; + + $crate::impls::assert_expected_events!( + Self, + vec![ + RuntimeEvent::::ForeignAssets( + $crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount } + ) => { + asset_id: *asset_id == id, + owner: *owner == beneficiary.clone().into(), + amount: *amount == amount_to_mint, + }, + ] + ); + }); + } + } + } + }; +} diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml similarity index 57% rename from cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/Cargo.toml rename to cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml index 53a6f0840a5..34713f5b48e 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml @@ -1,18 +1,18 @@ [package] -name = "rococo-wococo-system-emulated-network" +name = "rococo-westend-system-emulated-network" version = "0.0.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" -description = "Rococo<>Wococo emulated bridged network" +description = "Rococo<>Westend emulated bridged network" publish = false [dependencies] # Cumulus emulated-integration-tests-common = { path = "../../common", default-features = false } rococo-emulated-chain = { path = "../../chains/relays/rococo" } -wococo-emulated-chain = { path = "../../chains/relays/wococo" } +westend-emulated-chain = { path = "../../chains/relays/westend" } asset-hub-rococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-rococo" } -asset-hub-wococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-wococo" } +asset-hub-westend-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-westend" } bridge-hub-rococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-rococo" } -bridge-hub-wococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-wococo" } +bridge-hub-westend-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-westend" } diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs similarity index 57% rename from cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/src/lib.rs rename to cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs index e20dcfa6b32..b03ff692b95 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs @@ -14,23 +14,23 @@ // limitations under the License. pub use asset_hub_rococo_emulated_chain; -pub use asset_hub_wococo_emulated_chain; +pub use asset_hub_westend_emulated_chain; pub use bridge_hub_rococo_emulated_chain; -pub use bridge_hub_wococo_emulated_chain; +pub use bridge_hub_westend_emulated_chain; pub use rococo_emulated_chain; -pub use wococo_emulated_chain; +pub use westend_emulated_chain; use asset_hub_rococo_emulated_chain::AssetHubRococo; -use asset_hub_wococo_emulated_chain::AssetHubWococo; +use asset_hub_westend_emulated_chain::AssetHubWestend; use bridge_hub_rococo_emulated_chain::BridgeHubRococo; -use bridge_hub_wococo_emulated_chain::BridgeHubWococo; +use bridge_hub_westend_emulated_chain::BridgeHubWestend; use rococo_emulated_chain::Rococo; -use wococo_emulated_chain::Wococo; +use westend_emulated_chain::Westend; // Cumulus use emulated_integration_tests_common::{ accounts::{ALICE, BOB}, - impls::{BridgeHubMessageHandler, BridgeMessagesInstance2}, + impls::{BridgeHubMessageHandler, BridgeMessagesInstance1, BridgeMessagesInstance3}, xcm_emulator::{ decl_test_bridges, decl_test_networks, decl_test_sender_receiver_accounts_parameter_types, Chain, @@ -44,51 +44,53 @@ decl_test_networks! { AssetHubRococo, BridgeHubRococo, ], - bridge = RococoWococoMockBridge + bridge = RococoWestendMockBridge }, - pub struct WococoMockNet { - relay_chain = Wococo, + pub struct WestendMockNet { + relay_chain = Westend, parachains = vec![ - AssetHubWococo, - BridgeHubWococo, + AssetHubWestend, + BridgeHubWestend, ], - bridge = WococoRococoMockBridge + bridge = WestendRococoMockBridge }, } decl_test_bridges! { - pub struct RococoWococoMockBridge { + pub struct RococoWestendMockBridge { source = BridgeHubRococoPara, - target = BridgeHubWococoPara, - handler = RococoWococoMessageHandler + target = BridgeHubWestendPara, + handler = RococoWestendMessageHandler }, - pub struct WococoRococoMockBridge { - source = BridgeHubWococoPara, + pub struct WestendRococoMockBridge { + source = BridgeHubWestendPara, target = BridgeHubRococoPara, - handler = WococoRococoMessageHandler + handler = WestendRococoMessageHandler } } type BridgeHubRococoRuntime = ::Runtime; -type BridgeHubWococoRuntime = ::Runtime; +type BridgeHubWestendRuntime = ::Runtime; -pub type RococoWococoMessageHandler = BridgeHubMessageHandler< +pub type RococoWestendMessageHandler = BridgeHubMessageHandler< BridgeHubRococoRuntime, - BridgeHubWococoRuntime, - BridgeMessagesInstance2, + BridgeMessagesInstance3, + BridgeHubWestendRuntime, + BridgeMessagesInstance1, >; -pub type WococoRococoMessageHandler = BridgeHubMessageHandler< - BridgeHubWococoRuntime, +pub type WestendRococoMessageHandler = BridgeHubMessageHandler< + BridgeHubWestendRuntime, + BridgeMessagesInstance1, BridgeHubRococoRuntime, - BridgeMessagesInstance2, + BridgeMessagesInstance3, >; decl_test_sender_receiver_accounts_parameter_types! { RococoRelay { sender: ALICE, receiver: BOB }, AssetHubRococoPara { sender: ALICE, receiver: BOB }, BridgeHubRococoPara { sender: ALICE, receiver: BOB }, - WococoRelay { sender: ALICE, receiver: BOB }, - AssetHubWococoPara { sender: ALICE, receiver: BOB }, - BridgeHubWococoPara { sender: ALICE, receiver: BOB } + WestendRelay { sender: ALICE, receiver: BOB }, + AssetHubWestendPara { sender: ALICE, receiver: BOB }, + BridgeHubWestendPara { sender: ALICE, receiver: BOB } } diff --git a/cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml deleted file mode 100644 index a596617e82b..00000000000 --- a/cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "wococo-system-emulated-network" -version = "0.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Wococo System emulated network" -publish = false - -[dependencies] -# Cumulus -emulated-integration-tests-common = { path = "../../common", default-features = false } -wococo-emulated-chain = { path = "../../chains/relays/wococo" } -asset-hub-wococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-wococo" } -bridge-hub-wococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-wococo" } -penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs deleted file mode 100644 index 5369afe7dff..00000000000 --- a/cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub use asset_hub_wococo_emulated_chain; -pub use bridge_hub_wococo_emulated_chain; -pub use wococo_emulated_chain; - -use asset_hub_wococo_emulated_chain::AssetHubWococo; -use bridge_hub_wococo_emulated_chain::BridgeHubWococo; -use penpal_emulated_chain::{PenpalA, PenpalB}; -use wococo_emulated_chain::Wococo; - -// Cumulus -use emulated_integration_tests_common::{ - accounts::{ALICE, BOB}, - xcm_emulator::{decl_test_networks, decl_test_sender_receiver_accounts_parameter_types}, -}; - -decl_test_networks! { - pub struct WococoMockNet { - relay_chain = Wococo, - parachains = vec![ - AssetHubWococo, - BridgeHubWococo, - PenpalA, - PenpalB, - ], - bridge = () - }, -} - -decl_test_sender_receiver_accounts_parameter_types! { - WococoRelay { sender: ALICE, receiver: BOB }, - AssetHubWococoPara { sender: ALICE, receiver: BOB }, - BridgeHubWococoPara { sender: ALICE, receiver: BOB }, - PenpalAPara { sender: ALICE, receiver: BOB }, - PenpalBPara { sender: ALICE, receiver: BOB } -} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index 035d9c10793..00e3af2e4ff 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -11,7 +11,9 @@ publish = false codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } # Substrate -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } # Polkadot @@ -30,4 +32,4 @@ cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", def cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false} bridge-hub-rococo-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } emulated-integration-tests-common = { path = "../../../common", default-features = false} -rococo-wococo-system-emulated-network ={ path = "../../../networks/rococo-wococo-system" } +rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs index 19e10d23bbb..53665437887 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs @@ -21,7 +21,7 @@ pub use xcm::{ prelude::{AccountId32 as AccountId32Junction, *}, v3::{ Error, - NetworkId::{Rococo as RococoId, Wococo as WococoId}, + NetworkId::{Rococo as RococoId, Westend as WestendId}, }, }; @@ -30,6 +30,8 @@ pub use bp_messages::LaneId; // Cumulus pub use emulated_integration_tests_common::{ + accounts::ALICE, + impls::Inspect, test_parachain_is_trusted_teleporter, xcm_emulator::{ assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, @@ -39,17 +41,22 @@ pub use emulated_integration_tests_common::{ PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, }; pub use parachains_common::{AccountId, Balance}; -pub use rococo_wococo_system_emulated_network::{ +pub use rococo_westend_system_emulated_network::{ + asset_hub_rococo_emulated_chain::{ + genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, + }, + asset_hub_westend_emulated_chain::{ + genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, + }, bridge_hub_rococo_emulated_chain::{ genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubRococoParaPallet as BridgeHubRococoPallet, }, - rococo_emulated_chain::{genesis::ED as ROCOCO_ED, RococoRelayPallet as RococoPallet}, + rococo_emulated_chain::RococoRelayPallet as RococoPallet, AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, - AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWococoPara as AssetHubWococo, - BridgeHubRococoPara as BridgeHubRococo, BridgeHubRococoParaReceiver as BridgeHubRococoReceiver, - BridgeHubRococoParaSender as BridgeHubRococoSender, BridgeHubWococoPara as BridgeHubWococo, - RococoRelay as Rococo, RococoRelayReceiver as RococoReceiver, - RococoRelaySender as RococoSender, + AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWestendPara as AssetHubWestend, + AssetHubWestendParaReceiver as AssetHubWestendReceiver, BridgeHubRococoPara as BridgeHubRococo, + BridgeHubRococoParaSender as BridgeHubRococoSender, BridgeHubWestendPara as BridgeHubWestend, + RococoRelay as Rococo, }; pub const ASSET_ID: u32 = 1; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs new file mode 100644 index 00000000000..c55613f2826 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs @@ -0,0 +1,219 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; + +fn send_asset_from_asset_hub_rococo_to_asset_hub_westend(id: MultiLocation, amount: u128) { + let signed_origin = + ::RuntimeOrigin::signed(AssetHubRococoSender::get().into()); + let asset_hub_westend_para_id = AssetHubWestend::para_id().into(); + let destination = MultiLocation { + parents: 2, + interior: X2(GlobalConsensus(NetworkId::Westend), Parachain(asset_hub_westend_para_id)), + }; + let beneficiary_id = AssetHubWestendReceiver::get(); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: beneficiary_id.into() }.into(); + let assets: MultiAssets = (id, amount).into(); + let fee_asset_item = 0; + + // fund the AHR's SA on BHR for paying bridge transport fees + let ahr_as_seen_by_bhr = BridgeHubRococo::sibling_location_of(AssetHubRococo::para_id()); + let sov_ahr_on_bhr = BridgeHubRococo::sovereign_account_id_of(ahr_as_seen_by_bhr); + BridgeHubRococo::fund_accounts(vec![(sov_ahr_on_bhr.into(), 10_000_000_000_000u128)]); + + AssetHubRococo::execute_with(|| { + assert_ok!( + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ) + ); + }); + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubRococo, + vec![ + // pay for bridge fees + RuntimeEvent::Balances(pallet_balances::Event::Withdraw { .. }) => {}, + // message exported + RuntimeEvent::BridgeWestendMessages( + pallet_bridge_messages::Event::MessageAccepted { .. } + ) => {}, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubWestend, + vec![ + // message dispatched successfully + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }); +} + +#[test] +fn send_rocs_from_asset_hub_rococo_to_asset_hub_westend() { + let roc_at_asset_hub_rococo: MultiLocation = Parent.into(); + let roc_at_asset_hub_westend = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Rococo)) }; + let owner: AccountId = AssetHubWestend::account_id_of(ALICE); + AssetHubWestend::force_create_foreign_asset( + roc_at_asset_hub_westend, + owner, + true, + ASSET_MIN_BALANCE, + vec![], + ); + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Westend, + AssetHubWestend::para_id(), + ); + + let rocs_in_reserve_on_ahr_before = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + let sender_rocs_before = + ::account_data_of(AssetHubRococoSender::get()).free; + let receiver_rocs_before = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendReceiver::get()) + }); + + let amount = ASSET_HUB_ROCOCO_ED * 1_000; + send_asset_from_asset_hub_rococo_to_asset_hub_westend(roc_at_asset_hub_rococo, amount); + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // issue ROCs on AHW + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == roc_at_asset_hub_rococo, + owner: *owner == AssetHubWestendReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_rocs_after = + ::account_data_of(AssetHubRococoSender::get()).free; + let receiver_rocs_after = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendReceiver::get()) + }); + let rocs_in_reserve_on_ahr_after = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + + // Sender's balance is reduced + assert!(sender_rocs_before > sender_rocs_after); + // Receiver's balance is increased + assert!(receiver_rocs_after > receiver_rocs_before); + // Reserve balance is reduced by sent amount + assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before + amount); +} + +#[test] +fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { + let prefund_amount = 10_000_000_000_000u128; + let wnd_at_asset_hub_rococo = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Westend)) }; + let owner: AccountId = AssetHubWestend::account_id_of(ALICE); + AssetHubRococo::force_create_foreign_asset( + wnd_at_asset_hub_rococo, + owner, + true, + ASSET_MIN_BALANCE, + vec![(AssetHubRococoSender::get(), prefund_amount)], + ); + + // fund the AHR's SA on AHW with the WND tokens held in reserve + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Rococo, + AssetHubRococo::para_id(), + ); + AssetHubWestend::fund_accounts(vec![(sov_ahr_on_ahw.clone(), prefund_amount)]); + + let wnds_in_reserve_on_ahw_before = + ::account_data_of(sov_ahr_on_ahw.clone()).free; + assert_eq!(wnds_in_reserve_on_ahw_before, prefund_amount); + let sender_wnds_before = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoSender::get()) + }); + assert_eq!(sender_wnds_before, prefund_amount); + let receiver_wnds_before = + ::account_data_of(AssetHubWestendReceiver::get()).free; + + let amount_to_send = ASSET_HUB_WESTEND_ED * 1_000; + send_asset_from_asset_hub_rococo_to_asset_hub_westend(wnd_at_asset_hub_rococo, amount_to_send); + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // WND is withdrawn from AHR's SA on AHW + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_ahr_on_ahw, + amount: *amount == amount_to_send, + }, + // WNDs deposited to beneficiary + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, .. }) => { + who: *who == AssetHubWestendReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_wnds_after = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoSender::get()) + }); + let receiver_wnds_after = + ::account_data_of(AssetHubWestendReceiver::get()).free; + let wnds_in_reserve_on_ahw_after = + ::account_data_of(sov_ahr_on_ahw).free; + + // Sender's balance is reduced + assert!(sender_wnds_before > sender_wnds_after); + // Receiver's balance is increased + assert!(receiver_wnds_after > receiver_wnds_before); + // Reserve balance is reduced by sent amount + assert_eq!(wnds_in_reserve_on_ahw_after, wnds_in_reserve_on_ahw_before - amount_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs index 1eef05c6b92..4e2ef1434fd 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs @@ -13,5 +13,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod example; +mod asset_transfers; +mod send_xcm; mod teleport; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/example.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs similarity index 77% rename from cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/example.rs rename to cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index 35cfa394174..4e61f7ce0dd 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/example.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -16,7 +16,7 @@ use crate::*; #[test] -fn example() { +fn send_xcm_from_rococo_relay_to_westend_asset_hub() { // Init tests variables // XcmPallet send arguments let sudo_origin = ::RuntimeOrigin::root(); @@ -29,13 +29,13 @@ fn example() { let xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { - network: WococoId, - destination: X1(Parachain(AssetHubWococo::para_id().into())), + network: WestendId, + destination: X1(Parachain(AssetHubWestend::para_id().into())), xcm: remote_xcm, }, ])); - //Rococo Global Consensus + // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { assert_ok!(::XcmPallet::send( @@ -64,32 +64,32 @@ fn example() { success: true, .. }) => {}, - RuntimeEvent::BridgeWococoMessages(pallet_bridge_messages::Event::MessageAccepted { - lane_id: LaneId([0, 0, 0, 1]), + RuntimeEvent::BridgeWestendMessages(pallet_bridge_messages::Event::MessageAccepted { + lane_id: LaneId([0, 0, 0, 2]), nonce: 1, }) => {}, ] ); }); - // Wococo GLobal Consensus + // Westend Global Consensus // Receive XCM message in Bridge Hub target Parachain - BridgeHubWococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( - BridgeHubWococo, + BridgeHubWestend, vec![ RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, ] ); }); - // Receive embeded XCM message within `ExportMessage` in Parachain destination - AssetHubWococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; + // Receive embedded XCM message within `ExportMessage` in Parachain destination + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( - AssetHubWococo, + AssetHubWestend, vec![ RuntimeEvent::MessageQueue(pallet_message_queue::Event::ProcessingFailed { .. diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index 62b969b682f..e5b1fce5f2b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -11,7 +11,9 @@ publish = false codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } # Substrate -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } # Polkadot @@ -30,4 +32,4 @@ cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", def cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false} bridge-hub-westend-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-westend", default-features = false } emulated-integration-tests-common = { path = "../../../common", default-features = false} -westend-system-emulated-network ={ path = "../../../networks/westend-system" } +rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs index f406a73d18d..04746aa8670 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs @@ -19,7 +19,10 @@ pub use frame_support::assert_ok; // Polkadot pub use xcm::{ prelude::{AccountId32 as AccountId32Junction, *}, - v3::{Error, NetworkId::Rococo as RococoId}, + v3::{ + Error, + NetworkId::{Rococo as RococoId, Westend as WestendId}, + }, }; // Bridges @@ -27,6 +30,8 @@ pub use bp_messages::LaneId; // Cumulus pub use emulated_integration_tests_common::{ + accounts::ALICE, + impls::Inspect, test_parachain_is_trusted_teleporter, xcm_emulator::{ assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, @@ -36,16 +41,22 @@ pub use emulated_integration_tests_common::{ PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, }; pub use parachains_common::{AccountId, Balance}; -pub use westend_system_emulated_network::{ +pub use rococo_westend_system_emulated_network::{ + asset_hub_rococo_emulated_chain::{ + genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, + }, + asset_hub_westend_emulated_chain::{ + genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, + }, bridge_hub_westend_emulated_chain::{ - genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubWestendParaPallet as BridgeHubWestendPallet, + genesis::ED as BRIDGE_HUB_WESTEND_ED, BridgeHubWestendParaPallet as BridgeHubWestendPallet, }, - westend_emulated_chain::{genesis::ED as ROCOCO_ED, WestendRelayPallet as WestendPallet}, + westend_emulated_chain::WestendRelayPallet as WestendPallet, + AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, AssetHubWestendPara as AssetHubWestend, AssetHubWestendParaReceiver as AssetHubWestendReceiver, - AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubWestendPara as BridgeHubWestend, - BridgeHubWestendParaReceiver as BridgeHubWestendReceiver, - BridgeHubWestendParaSender as BridgeHubWestendSender, WestendRelay as Westend, - WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender, + AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubRococoPara as BridgeHubRococo, + BridgeHubWestendPara as BridgeHubWestend, BridgeHubWestendParaSender as BridgeHubWestendSender, + WestendRelay as Westend, }; pub const ASSET_ID: u32 = 1; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs new file mode 100644 index 00000000000..f90514f80c3 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -0,0 +1,218 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use crate::*; + +fn send_asset_from_asset_hub_westend_to_asset_hub_rococo(id: MultiLocation, amount: u128) { + let signed_origin = + ::RuntimeOrigin::signed(AssetHubWestendSender::get().into()); + let asset_hub_rococo_para_id = AssetHubRococo::para_id().into(); + let destination = MultiLocation { + parents: 2, + interior: X2(GlobalConsensus(NetworkId::Rococo), Parachain(asset_hub_rococo_para_id)), + }; + let beneficiary_id = AssetHubRococoReceiver::get(); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: beneficiary_id.into() }.into(); + let assets: MultiAssets = (id, amount).into(); + let fee_asset_item = 0; + + // fund the AHW's SA on BHW for paying bridge transport fees + let ahw_as_seen_by_bhw = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); + let sov_ahw_on_bhw = BridgeHubWestend::sovereign_account_id_of(ahw_as_seen_by_bhw); + BridgeHubWestend::fund_accounts(vec![(sov_ahw_on_bhw.into(), 10_000_000_000_000u128)]); + + AssetHubWestend::execute_with(|| { + assert_ok!( + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ) + ); + }); + + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubWestend, + vec![ + // pay for bridge fees + RuntimeEvent::Balances(pallet_balances::Event::Withdraw { .. }) => {}, + // message exported + RuntimeEvent::BridgeRococoMessages( + pallet_bridge_messages::Event::MessageAccepted { .. } + ) => {}, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubRococo, + vec![ + // message dispatched successfully + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }); +} + +#[test] +fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { + let wnd_at_asset_hub_westend: MultiLocation = Parent.into(); + let wnd_at_asset_hub_rococo = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Westend)) }; + let owner: AccountId = AssetHubRococo::account_id_of(ALICE); + AssetHubRococo::force_create_foreign_asset( + wnd_at_asset_hub_rococo, + owner, + true, + ASSET_MIN_BALANCE, + vec![], + ); + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Rococo, + AssetHubRococo::para_id(), + ); + + let wnds_in_reserve_on_ahw_before = + ::account_data_of(sov_ahr_on_ahw.clone()).free; + let sender_wnds_before = + ::account_data_of(AssetHubWestendSender::get()).free; + let receiver_wnds_before = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoReceiver::get()) + }); + + let amount = ASSET_HUB_WESTEND_ED * 1_000; + send_asset_from_asset_hub_westend_to_asset_hub_rococo(wnd_at_asset_hub_westend, amount); + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // issue WNDs on AHR + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == wnd_at_asset_hub_rococo, + owner: *owner == AssetHubRococoReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_wnds_after = + ::account_data_of(AssetHubWestendSender::get()).free; + let receiver_wnds_after = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoReceiver::get()) + }); + let wnds_in_reserve_on_ahw_after = + ::account_data_of(sov_ahr_on_ahw).free; + + // Sender's balance is reduced + assert!(sender_wnds_before > sender_wnds_after); + // Receiver's balance is increased + assert!(receiver_wnds_after > receiver_wnds_before); + // Reserve balance is increased by sent amount + assert_eq!(wnds_in_reserve_on_ahw_after, wnds_in_reserve_on_ahw_before + amount); +} + +#[test] +fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { + let prefund_amount = 10_000_000_000_000u128; + let roc_at_asset_hub_westend = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Rococo)) }; + let owner: AccountId = AssetHubWestend::account_id_of(ALICE); + AssetHubWestend::force_create_foreign_asset( + roc_at_asset_hub_westend, + owner, + true, + ASSET_MIN_BALANCE, + vec![(AssetHubWestendSender::get(), prefund_amount)], + ); + + // fund the AHW's SA on AHR with the ROC tokens held in reserve + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Westend, + AssetHubWestend::para_id(), + ); + AssetHubRococo::fund_accounts(vec![(sov_ahw_on_ahr.clone(), prefund_amount)]); + + let rocs_in_reserve_on_ahr_before = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + assert_eq!(rocs_in_reserve_on_ahr_before, prefund_amount); + let sender_rocs_before = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendSender::get()) + }); + assert_eq!(sender_rocs_before, prefund_amount); + let receiver_rocs_before = + ::account_data_of(AssetHubRococoReceiver::get()).free; + + let amount_to_send = ASSET_HUB_ROCOCO_ED * 1_000; + send_asset_from_asset_hub_westend_to_asset_hub_rococo(roc_at_asset_hub_westend, amount_to_send); + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // ROC is withdrawn from AHW's SA on AHR + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_ahw_on_ahr, + amount: *amount == amount_to_send, + }, + // ROCs deposited to beneficiary + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, .. }) => { + who: *who == AssetHubRococoReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_rocs_after = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendSender::get()) + }); + let receiver_rocs_after = + ::account_data_of(AssetHubRococoReceiver::get()).free; + let rocs_in_reserve_on_ahr_after = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + + // Sender's balance is reduced + assert!(sender_rocs_before > sender_rocs_after); + // Receiver's balance is increased + assert!(receiver_rocs_after > receiver_rocs_before); + // Reserve balance is reduced by sent amount + assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before - amount_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs index 1eef05c6b92..4e2ef1434fd 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs @@ -13,5 +13,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod example; +mod asset_transfers; +mod send_xcm; mod teleport; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/example.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs similarity index 71% rename from cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/example.rs rename to cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index 1fdd9441e48..4b21d758cd9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/example.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -16,7 +16,7 @@ use crate::*; #[test] -fn example() { +fn send_xcm_from_westend_relay_to_rococo_asset_hub() { // Init tests variables // XcmPallet send arguments let sudo_origin = ::RuntimeOrigin::root(); @@ -30,7 +30,7 @@ fn example() { UnpaidExecution { weight_limit, check_origin }, ExportMessage { network: RococoId, - destination: X1(Parachain(AssetHubWestend::para_id().into())), + destination: X1(Parachain(AssetHubRococo::para_id().into())), xcm: remote_xcm, }, ])); @@ -71,4 +71,30 @@ fn example() { ] ); }); + + // Rococo Global Consensus + // Receive XCM message in Bridge Hub target Parachain + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + // Receive embedded XCM message within `ExportMessage` in Parachain destination + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::ProcessingFailed { + .. + }) => {}, + ] + ); + }); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs index 32639b8614b..8dff6c29295 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs @@ -18,7 +18,7 @@ use bridge_hub_westend_runtime::xcm_config::XcmConfig; #[test] fn teleport_to_other_system_parachains_works() { - let amount = BRIDGE_HUB_ROCOCO_ED * 100; + let amount = BRIDGE_HUB_WESTEND_ED * 100; let native_asset: MultiAssets = (Parent, amount).into(); test_parachain_is_trusted_teleporter!( -- GitLab From 7cfc233cdc6e6c709594ff26640a350c2e6fb6a8 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Tue, 14 Nov 2023 15:03:19 +0100 Subject: [PATCH 21/74] PVF: fix detection of unshare-and-change-root security capability (#2304) --- Cargo.lock | 1 + .../node/core/candidate-validation/src/lib.rs | 2 +- polkadot/node/core/pvf/Cargo.toml | 1 + .../benches/host_prepare_rococo_runtime.rs | 2 +- .../node/core/pvf/common/src/worker/mod.rs | 6 +++--- polkadot/node/core/pvf/src/host.rs | 8 ++++++-- polkadot/node/core/pvf/src/security.rs | 20 +++++++++++++++++-- polkadot/node/core/pvf/tests/it/main.rs | 2 +- 8 files changed, 32 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 32d9099b386..a9b6c68b50f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12453,6 +12453,7 @@ dependencies = [ "polkadot-node-core-pvf-prepare-worker", "polkadot-node-metrics", "polkadot-node-primitives", + "polkadot-node-subsystem", "polkadot-parachain-primitives", "polkadot-primitives", "procfs", diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 89ea0272884..4232e5f1cdd 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -150,7 +150,7 @@ async fn run( ), pvf_metrics, ) - .await; + .await?; ctx.spawn_blocking("pvf-validation-host", task.boxed())?; loop { diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 430f7cd5e8e..3e72ca9e532 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -27,6 +27,7 @@ polkadot-core-primitives = { path = "../../../core-primitives" } polkadot-node-core-pvf-common = { path = "common" } polkadot-node-metrics = { path = "../../metrics" } polkadot-node-primitives = { path = "../../primitives" } +polkadot-node-subsystem = { path = "../../subsystem" } polkadot-primitives = { path = "../../../primitives" } sp-core = { path = "../../../../substrate/primitives/core" } diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs index acd80526262..d0cefae6cdb 100644 --- a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs @@ -47,7 +47,7 @@ impl TestHost { execute_worker_path, ); f(&mut config); - let (host, task) = start(config, Metrics::default()).await; + let (host, task) = start(config, Metrics::default()).await.unwrap(); let _ = handle.spawn(task); Self { host: Mutex::new(host) } } diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs index 274a2fc8039..f6a67b98321 100644 --- a/polkadot/node/core/pvf/common/src/worker/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/mod.rs @@ -92,13 +92,13 @@ macro_rules! decl_worker_main { std::process::exit(status) }, "--check-can-unshare-user-namespace-and-change-root" => { + #[cfg(target_os = "linux")] + let cache_path_tempdir = std::path::Path::new(&args[2]); #[cfg(target_os = "linux")] let status = if let Err(err) = security::unshare_user_namespace_and_change_root( $crate::worker::WorkerKind::CheckPivotRoot, worker_pid, - // We're not accessing any files, so we can try to pivot_root in the temp - // dir without conflicts with other processes. - &std::env::temp_dir(), + &cache_path_tempdir, ) { // Write the error to stderr, log it on the host-side. eprintln!("{}", err); diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 7b383e8034a..5919b9ba32c 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -35,6 +35,7 @@ use polkadot_node_core_pvf_common::{ error::{PrepareError, PrepareResult}, pvf::PvfPrepData, }; +use polkadot_node_subsystem::SubsystemResult; use polkadot_parachain_primitives::primitives::ValidationResult; use std::{ collections::HashMap, @@ -203,7 +204,10 @@ impl Config { /// The future should not return normally but if it does then that indicates an unrecoverable error. /// In that case all pending requests will be canceled, dropping the result senders and new ones /// will be rejected. -pub async fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Future) { +pub async fn start( + config: Config, + metrics: Metrics, +) -> SubsystemResult<(ValidationHost, impl Future)> { gum::debug!(target: LOG_TARGET, ?config, "starting PVF validation host"); // Run checks for supported security features once per host startup. Warn here if not enabled. @@ -273,7 +277,7 @@ pub async fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Fu }; }; - (validation_host, task) + Ok((validation_host, task)) } /// A mapping from an artifact ID which is in preparation state to the list of pending execution diff --git a/polkadot/node/core/pvf/src/security.rs b/polkadot/node/core/pvf/src/security.rs index 295dd7df94d..0c0c5f40166 100644 --- a/polkadot/node/core/pvf/src/security.rs +++ b/polkadot/node/core/pvf/src/security.rs @@ -27,14 +27,19 @@ const SECURE_MODE_ANNOUNCEMENT: &'static str = \nMore information: https://wiki.polkadot.network/docs/maintain-guides-secure-validator#secure-validator-mode"; /// Run checks for supported security features. +/// +/// # Return +/// +/// Returns the set of security features that we were able to enable. If an error occurs while +/// enabling a security feature we set the corresponding status to `false`. pub async fn check_security_status(config: &Config) -> SecurityStatus { - let Config { prepare_worker_program_path, .. } = config; + let Config { prepare_worker_program_path, cache_path, .. } = config; // TODO: add check that syslog is available and that seccomp violations are logged? let (landlock, seccomp, change_root) = join!( check_landlock(prepare_worker_program_path), check_seccomp(prepare_worker_program_path), - check_can_unshare_user_namespace_and_change_root(prepare_worker_program_path) + check_can_unshare_user_namespace_and_change_root(prepare_worker_program_path, cache_path) ); let security_status = SecurityStatus { @@ -149,11 +154,22 @@ fn print_secure_mode_message(errs: Vec) -> bool { async fn check_can_unshare_user_namespace_and_change_root( #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] prepare_worker_program_path: &Path, + #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] cache_path: &Path, ) -> SecureModeResult { cfg_if::cfg_if! { if #[cfg(target_os = "linux")] { + let cache_dir_tempdir = + crate::worker_intf::tmppath_in("check-can-unshare", cache_path) + .await + .map_err( + |err| + SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + format!("could not create a temporary directory in {:?}: {}", cache_path, err) + ) + )?; match tokio::process::Command::new(prepare_worker_program_path) .arg("--check-can-unshare-user-namespace-and-change-root") + .arg(cache_dir_tempdir) .output() .await { diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index f4fd7f802f5..801b60884fa 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -61,7 +61,7 @@ impl TestHost { execute_worker_path, ); f(&mut config); - let (host, task) = start(config, Metrics::default()).await; + let (host, task) = start(config, Metrics::default()).await.unwrap(); let _ = tokio::task::spawn(task); Self { cache_dir, host: Mutex::new(host) } } -- GitLab From b70d418f89a722b0543ba2fb1c014d5ba6e65186 Mon Sep 17 00:00:00 2001 From: Lulu Date: Tue, 14 Nov 2023 14:23:08 +0000 Subject: [PATCH 22/74] Add environment to claim workflow (#2318) Turns out to access environment secrets the workflow must explicitly opt in to the environment. --- .github/workflows/claim-crates.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/claim-crates.yml b/.github/workflows/claim-crates.yml index a1d28d42828..345d24c7566 100644 --- a/.github/workflows/claim-crates.yml +++ b/.github/workflows/claim-crates.yml @@ -8,6 +8,7 @@ on: jobs: claim-crates: runs-on: ubuntu-latest + environment: master steps: - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 -- GitLab From cfe5e62626ac61e452a77ea0545aaeaa2cc1176e Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 14 Nov 2023 16:54:36 +0200 Subject: [PATCH 23/74] chainHead: Support multiple hashes for `chainHead_unpin` method (#2295) This PR adds support for multiple hashes being passed to the `chainHeda_unpin` parameters. The `hash` parameter is renamed to `hash_or_hashes` per https://github.com/paritytech/json-rpc-interface-spec/pull/111. While at it, a new integration test is added to check the unpinning of multiple hashes. The API is checked against a hash or a vector of hashes. cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile --- Cargo.lock | 1 + substrate/client/rpc-spec-v2/Cargo.toml | 1 + .../client/rpc-spec-v2/src/chain_head/api.rs | 11 +- .../rpc-spec-v2/src/chain_head/chain_head.rs | 12 +- .../src/chain_head/subscription/inner.rs | 38 ++-- .../src/chain_head/subscription/mod.rs | 17 +- .../rpc-spec-v2/src/chain_head/tests.rs | 173 +++++++++++++++++- 7 files changed, 224 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a9b6c68b50f..c57a5ce1393 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15970,6 +15970,7 @@ dependencies = [ "sp-consensus", "sp-core", "sp-maybe-compressed-blob", + "sp-rpc", "sp-runtime", "sp-version", "substrate-test-runtime", diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index cfe7f8a117d..ca61286ddfa 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -21,6 +21,7 @@ sc-transaction-pool-api = { path = "../transaction-pool/api" } sp-core = { path = "../../primitives/core" } sp-runtime = { path = "../../primitives/runtime" } sp-api = { path = "../../primitives/api" } +sp-rpc = { path = "../../primitives/rpc" } sp-blockchain = { path = "../../primitives/blockchain" } sp-version = { path = "../../primitives/version" } sc-client-api = { path = "../api" } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/api.rs b/substrate/client/rpc-spec-v2/src/chain_head/api.rs index d93c4018b60..427b5499bc1 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/api.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/api.rs @@ -21,6 +21,7 @@ //! API trait of the chain head. use crate::chain_head::event::{FollowEvent, MethodResponse, StorageQuery}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use sp_rpc::list::ListOrValue; #[rpc(client, server)] pub trait ChainHeadApi { @@ -109,16 +110,22 @@ pub trait ChainHeadApi { call_parameters: String, ) -> RpcResult; - /// Unpin a block reported by the `follow` method. + /// Unpin a block or multiple blocks reported by the `follow` method. /// /// Ongoing operations that require the provided block /// will continue normally. /// + /// When this method returns an error, it is guaranteed that no blocks have been unpinned. + /// /// # Unstable /// /// This method is unstable and subject to change in the future. #[method(name = "chainHead_unstable_unpin", blocking)] - fn chain_head_unstable_unpin(&self, follow_subscription: String, hash: Hash) -> RpcResult<()>; + fn chain_head_unstable_unpin( + &self, + follow_subscription: String, + hash_or_hashes: ListOrValue, + ) -> RpcResult<()>; /// Resumes a storage fetch started with `chainHead_storage` after it has generated an /// `operationWaitingForContinue` event. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index a8c1c4f7e08..2d01c302037 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -48,6 +48,7 @@ use sc_client_api::{ use sp_api::CallApiAt; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_core::{traits::CallContext, Bytes}; +use sp_rpc::list::ListOrValue; use sp_runtime::traits::Block as BlockT; use std::{marker::PhantomData, sync::Arc, time::Duration}; @@ -432,9 +433,16 @@ where fn chain_head_unstable_unpin( &self, follow_subscription: String, - hash: Block::Hash, + hash_or_hashes: ListOrValue, ) -> RpcResult<()> { - match self.subscriptions.unpin_block(&follow_subscription, hash) { + let result = match hash_or_hashes { + ListOrValue::Value(hash) => + self.subscriptions.unpin_blocks(&follow_subscription, [hash]), + ListOrValue::List(hashes) => + self.subscriptions.unpin_blocks(&follow_subscription, hashes), + }; + + match result { Ok(()) => Ok(()), Err(SubscriptionManagementError::SubscriptionAbsent) => { // Invalid invalid subscription ID. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 8a75029a994..abd42ad9678 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -750,22 +750,36 @@ impl> SubscriptionsInner { } } - pub fn unpin_block( + pub fn unpin_blocks( &mut self, sub_id: &str, - hash: Block::Hash, + hashes: impl IntoIterator + Clone, ) -> Result<(), SubscriptionManagementError> { let Some(sub) = self.subs.get_mut(sub_id) else { return Err(SubscriptionManagementError::SubscriptionAbsent) }; - // Check that unpin was not called before and the block was pinned - // for this subscription. - if !sub.unregister_block(hash) { - return Err(SubscriptionManagementError::BlockHashAbsent) + // Ensure that all blocks are part of the subscription before removing individual + // blocks. + for hash in hashes.clone() { + if !sub.contains_block(hash) { + return Err(SubscriptionManagementError::BlockHashAbsent); + } + } + + // Note: this needs to be separate from the global mappings to avoid barrow checker + // thinking we borrow `&mut self` twice: once from `self.subs.get_mut` and once from + // `self.global_unregister_block`. Although the borrowing is correct, since different + // fields of the structure are borrowed, one at a time. + for hash in hashes.clone() { + sub.unregister_block(hash); + } + + // Block have been removed from the subscription. Remove them from the global tracking. + for hash in hashes { + self.global_unregister_block(hash); } - self.global_unregister_block(hash); Ok(()) } @@ -1029,11 +1043,11 @@ mod tests { assert_eq!(block.has_runtime(), true); let invalid_id = "abc-invalid".to_string(); - let err = subs.unpin_block(&invalid_id, hash).unwrap_err(); + let err = subs.unpin_blocks(&invalid_id, vec![hash]).unwrap_err(); assert_eq!(err, SubscriptionManagementError::SubscriptionAbsent); // Unpin the block. - subs.unpin_block(&id, hash).unwrap(); + subs.unpin_blocks(&id, vec![hash]).unwrap(); let err = subs.lock_block(&id, hash, 1).unwrap_err(); assert_eq!(err, SubscriptionManagementError::BlockHashAbsent); } @@ -1077,13 +1091,13 @@ mod tests { // Ensure the block propagated to the subscription. subs.subs.get(&id_second).unwrap().blocks.get(&hash).unwrap(); - subs.unpin_block(&id, hash).unwrap(); + subs.unpin_blocks(&id, vec![hash]).unwrap(); assert_eq!(*subs.global_blocks.get(&hash).unwrap(), 1); // Cannot unpin a block twice for the same subscription. - let err = subs.unpin_block(&id, hash).unwrap_err(); + let err = subs.unpin_blocks(&id, vec![hash]).unwrap_err(); assert_eq!(err, SubscriptionManagementError::BlockHashAbsent); - subs.unpin_block(&id_second, hash).unwrap(); + subs.unpin_blocks(&id_second, vec![hash]).unwrap(); // Block unregistered from the memory. assert!(subs.global_blocks.get(&hash).is_none()); } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs index b25b1a4913b..c830e662da2 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs @@ -94,22 +94,23 @@ impl> SubscriptionManagement { inner.pin_block(sub_id, hash) } - /// Unpin the block from the subscription. + /// Unpin the blocks from the subscription. /// - /// The last subscription that unpins the block is also unpinning the block - /// from the backend. + /// Blocks are reference counted and when the last subscription unpins a given block, the block + /// is also unpinned from the backend. /// /// This method is called only once per subscription. /// - /// Returns an error if the block is not pinned for the subscription or - /// the subscription ID is invalid. - pub fn unpin_block( + /// Returns an error if the subscription ID is invalid, or any of the blocks are not pinned + /// for the subscriptions. When an error is returned, it is guaranteed that no blocks have + /// been unpinned. + pub fn unpin_blocks( &self, sub_id: &str, - hash: Block::Hash, + hashes: impl IntoIterator + Clone, ) -> Result<(), SubscriptionManagementError> { let mut inner = self.inner.write(); - inner.unpin_block(sub_id, hash) + inner.unpin_blocks(sub_id, hashes) } /// Ensure the block remains pinned until the return object is dropped. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index c3f5564ebc4..15b258c4756 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -1591,14 +1591,17 @@ async fn follow_with_unpin() { // Unpin an invalid subscription ID must return Ok(()). let invalid_hash = hex_string(&INVALID_HASH); let _res: () = api - .call("chainHead_unstable_unpin", ["invalid_sub_id", &invalid_hash]) + .call("chainHead_unstable_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) .await .unwrap(); // Valid subscription with invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); let err = api - .call::<_, serde_json::Value>("chainHead_unstable_unpin", [&sub_id, &invalid_hash]) + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &invalid_hash], + ) .await .unwrap_err(); assert_matches!(err, @@ -1606,7 +1609,10 @@ async fn follow_with_unpin() { ); // To not exceed the number of pinned blocks, we need to unpin before the next import. - let _res: () = api.call("chainHead_unstable_unpin", [&sub_id, &block_hash]).await.unwrap(); + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_hash]) + .await + .unwrap(); // Block tree: // finalized_block -> block -> block2 @@ -1645,6 +1651,160 @@ async fn follow_with_unpin() { assert!(sub.next::>().await.is_none()); } +#[tokio::test] +async fn follow_with_multiple_unpin_hashes() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TaskExecutor::default()), + CHAIN_GENESIS, + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + }, + ) + .into_rpc(); + + let mut sub = api.subscribe("chainHead_unstable_follow", [false]).await.unwrap(); + let sub_id = sub.subscription_id(); + let sub_id = serde_json::to_string(&sub_id).unwrap(); + + // Import 3 blocks. + let block_1 = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_1_hash = block_1.header.hash(); + client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); + + let block_2 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1.hash()) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_2_hash = block_2.header.hash(); + client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); + + let block_3 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_2.hash()) + .with_parent_block_number(2) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_3_hash = block_3.header.hash(); + client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); + + // Ensure the imported block is propagated and pinned for this subscription. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::Initialized(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + + // Unpin an invalid subscription ID must return Ok(()). + let invalid_hash = hex_string(&INVALID_HASH); + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) + .await + .unwrap(); + + // Valid subscription with invalid block hash. + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &invalid_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + ); + + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_1_hash]) + .await + .unwrap(); + + // One block hash is invalid. Block 1 is already unpinned. + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, vec![&block_1_hash, &block_2_hash, &block_3_hash]], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + ); + + // Unpin multiple blocks. + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, vec![&block_2_hash, &block_3_hash]]) + .await + .unwrap(); + + // Check block 2 and 3 are unpinned. + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &block_2_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + ); + + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &block_3_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + ); +} + #[tokio::test] async fn follow_prune_best_block() { let builder = TestClientBuilder::new(); @@ -1828,7 +1988,7 @@ async fn follow_prune_best_block() { let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); let hash = format!("{:?}", block_2_hash); - let _res: () = api.call("chainHead_unstable_unpin", [&sub_id, &hash]).await.unwrap(); + let _res: () = api.call("chainHead_unstable_unpin", rpc_params![&sub_id, &hash]).await.unwrap(); } #[tokio::test] @@ -2305,7 +2465,10 @@ async fn pin_block_references() { wait_pinned_references(&backend, &hash, 1).await; // To not exceed the number of pinned blocks, we need to unpin before the next import. - let _res: () = api.call("chainHead_unstable_unpin", [&sub_id, &block_hash]).await.unwrap(); + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_hash]) + .await + .unwrap(); // Make sure unpin clears out the reference. let refs = backend.pin_refs(&hash).unwrap(); -- GitLab From cd38ccff7ff49b8f00bbaf799b2056c97ee6e0e9 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 14 Nov 2023 16:54:54 +0200 Subject: [PATCH 24/74] chainHead: Remove `chainHead_genesis` method (#2296) The method has been removed from the spec (https://github.com/paritytech/json-rpc-interface-spec/tree/main/src), this PR keeps the `chainHead` in sync with that change. @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile --- .../client/rpc-spec-v2/src/chain_head/api.rs | 8 ---- .../rpc-spec-v2/src/chain_head/chain_head.rs | 11 +---- .../rpc-spec-v2/src/chain_head/tests.rs | 44 +------------------ substrate/client/service/src/builder.rs | 1 - 4 files changed, 2 insertions(+), 62 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/chain_head/api.rs b/substrate/client/rpc-spec-v2/src/chain_head/api.rs index 427b5499bc1..9ae80137955 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/api.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/api.rs @@ -74,14 +74,6 @@ pub trait ChainHeadApi { hash: Hash, ) -> RpcResult>; - /// Get the chain's genesis hash. - /// - /// # Unstable - /// - /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_genesisHash", blocking)] - fn chain_head_unstable_genesis_hash(&self) -> RpcResult; - /// Returns storage entries at a specific block's state. /// /// # Unstable diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 2d01c302037..866701a7dbf 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -107,8 +107,6 @@ pub struct ChainHead, Block: BlockT, Client> { executor: SubscriptionTaskExecutor, /// Keep track of the pinned blocks for each subscription. subscriptions: Arc>, - /// The hexadecimal encoded hash of the genesis block. - genesis_hash: String, /// The maximum number of items reported by the `chainHead_storage` before /// pagination is required. operation_max_storage_items: usize, @@ -118,14 +116,12 @@ pub struct ChainHead, Block: BlockT, Client> { impl, Block: BlockT, Client> ChainHead { /// Create a new [`ChainHead`]. - pub fn new>( + pub fn new( client: Arc, backend: Arc, executor: SubscriptionTaskExecutor, - genesis_hash: GenesisHash, config: ChainHeadConfig, ) -> Self { - let genesis_hash = hex_string(&genesis_hash.as_ref()); Self { client, backend: backend.clone(), @@ -137,7 +133,6 @@ impl, Block: BlockT, Client> ChainHead { backend, )), operation_max_storage_items: config.operation_max_storage_items, - genesis_hash, _phantom: PhantomData, } } @@ -315,10 +310,6 @@ where .map_err(Into::into) } - fn chain_head_unstable_genesis_hash(&self) -> RpcResult { - Ok(self.genesis_hash.clone()) - } - fn chain_head_unstable_storage( &self, follow_subscription: String, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 15b258c4756..11c6798bf0a 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -28,7 +28,7 @@ use futures::Future; use jsonrpsee::{ core::{error::Error, server::rpc_module::Subscription as RpcSubscription}, rpc_params, - types::{error::CallError, EmptyServerParams as EmptyParams}, + types::error::CallError, RpcModule, }; use sc_block_builder::BlockBuilderBuilder; @@ -61,7 +61,6 @@ const MAX_PINNED_BLOCKS: usize = 32; const MAX_PINNED_SECS: u64 = 60; const MAX_OPERATIONS: usize = 16; const MAX_PAGINATION_LIMIT: usize = 5; -const CHAIN_GENESIS: [u8; 32] = [0; 32]; const INVALID_HASH: [u8; 32] = [1; 32]; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; @@ -111,7 +110,6 @@ async fn setup_api() -> ( client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -162,7 +160,6 @@ async fn follow_subscription_produces_blocks() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -231,7 +228,6 @@ async fn follow_with_runtime() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -345,31 +341,6 @@ async fn follow_with_runtime() { assert_eq!(event, expected); } -#[tokio::test] -async fn get_genesis() { - let builder = TestClientBuilder::new(); - let backend = builder.backend(); - let client = Arc::new(builder.build()); - - let api = ChainHead::new( - client.clone(), - backend, - Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, - ChainHeadConfig { - global_max_pinned_blocks: MAX_PINNED_BLOCKS, - subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), - subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - }, - ) - .into_rpc(); - - let genesis: String = - api.call("chainHead_unstable_genesisHash", EmptyParams::new()).await.unwrap(); - assert_eq!(genesis, hex_string(&CHAIN_GENESIS)); -} - #[tokio::test] async fn get_header() { let (_client, api, _sub, sub_id, block) = setup_api().await; @@ -569,7 +540,6 @@ async fn call_runtime_without_flag() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1228,7 +1198,6 @@ async fn separate_operation_ids_for_subscriptions() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1316,7 +1285,6 @@ async fn follow_generates_initial_blocks() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1472,7 +1440,6 @@ async fn follow_exceeding_pinned_blocks() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: 2, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1549,7 +1516,6 @@ async fn follow_with_unpin() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: 2, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1815,7 +1781,6 @@ async fn follow_prune_best_block() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2001,7 +1966,6 @@ async fn follow_forks_pruned_block() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2153,7 +2117,6 @@ async fn follow_report_multiple_pruned_block() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2399,7 +2362,6 @@ async fn pin_block_references() { client.clone(), backend.clone(), Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: 3, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2537,7 +2499,6 @@ async fn follow_finalized_before_new_block() { client_mock.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2652,7 +2613,6 @@ async fn ensure_operation_limits_works() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2757,7 +2717,6 @@ async fn check_continue_operation() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2940,7 +2899,6 @@ async fn stop_storage_operation() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 3838accde02..25f998385ba 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -637,7 +637,6 @@ where client.clone(), backend.clone(), task_executor.clone(), - client.info().genesis_hash, // Defaults to sensible limits for the `ChainHead`. sc_rpc_spec_v2::chain_head::ChainHeadConfig::default(), ) -- GitLab From 7d735fc8ae8c9dde906457e27c9d2694892adc8b Mon Sep 17 00:00:00 2001 From: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com> Date: Tue, 14 Nov 2023 18:22:30 +0200 Subject: [PATCH 25/74] Add simple collator election mechanism (#1340) Fixes https://github.com/paritytech/polkadot-sdk/issues/106 Port of cumulus PR https://github.com/paritytech/cumulus/pull/2960 This PR adds the ability to bid for collator slots even after the max number of collators have already registered. This eliminates the first come, first served mechanism that was in place before. Key changes: - added `update_bond` extrinsic to allow registered candidates to adjust their bonds in order to dynamically control their bids - added `take_candidate_slot` extrinsic to try to replace an already existing candidate by bidding more than them - candidates are now kept in a sorted list in the pallet storage, where the top `DesiredCandidates` out of `MaxCandidates` candidates in the list will be selected by the session pallet as collators - if the candidacy bond is increased through a `set_candidacy_bond` call, candidates which don't meet the new bond requirements are kicked # Checklist - [ ] My PR includes a detailed description as outlined in the "Description" section above - [ ] My PR follows the [labeling requirements](https://github.com/paritytech/polkadot-sdk/blob/master/docs/CONTRIBUTING.md#process) of this project (at minimum one label for `T` required) - [ ] I have made corresponding changes to the documentation (if applicable) - [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) - [ ] If this PR alters any external APIs or interfaces used by Polkadot, the corresponding Polkadot PR is ready as well as the corresponding Cumulus PR (optional) --------- Signed-off-by: georgepisaltu --- .../collator-selection/src/benchmarking.rs | 153 ++- cumulus/pallets/collator-selection/src/lib.rs | 437 ++++-- .../pallets/collator-selection/src/tests.rs | 1210 +++++++++++++++-- .../pallets/collator-selection/src/weights.rs | 36 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- 13 files changed, 1819 insertions(+), 251 deletions(-) diff --git a/cumulus/pallets/collator-selection/src/benchmarking.rs b/cumulus/pallets/collator-selection/src/benchmarking.rs index 49999dc114d..fa95303495d 100644 --- a/cumulus/pallets/collator-selection/src/benchmarking.rs +++ b/cumulus/pallets/collator-selection/src/benchmarking.rs @@ -25,14 +25,11 @@ use codec::Decode; use frame_benchmarking::{ account, impl_benchmark_test_suite, v2::*, whitelisted_caller, BenchmarkError, }; -use frame_support::{ - dispatch::DispatchResult, - traits::{Currency, EnsureOrigin, Get, ReservableCurrency}, -}; +use frame_support::traits::{Currency, EnsureOrigin, Get, ReservableCurrency}; use frame_system::{pallet_prelude::BlockNumberFor, EventRecord, RawOrigin}; use pallet_authorship::EventHandler; use pallet_session::{self as session, SessionManager}; -use sp_std::prelude::*; +use sp_std::{cmp, prelude::*}; pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -94,7 +91,7 @@ fn register_candidates(count: u32) { assert!(>::get() > 0u32.into(), "Bond cannot be zero!"); for who in candidates { - T::Currency::make_free_balance_be(&who, >::get() * 2u32.into()); + T::Currency::make_free_balance_be(&who, >::get() * 3u32.into()); >::register_as_candidate(RawOrigin::Signed(who).into()).unwrap(); } } @@ -107,8 +104,11 @@ fn min_candidates() -> u32 { fn min_invulnerables() -> u32 { let min_collators = T::MinEligibleCollators::get(); - let candidates_length = >::get().len(); - min_collators.saturating_sub(candidates_length.try_into().unwrap()) + let candidates_length = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + min_collators.saturating_sub(candidates_length) } #[benchmarks(where T: pallet_authorship::Config + session::Config)] @@ -160,22 +160,19 @@ mod benchmarks { .unwrap(); } // ... and register them. - for (who, _) in candidates { + for (who, _) in candidates.iter() { let deposit = >::get(); - T::Currency::make_free_balance_be(&who, deposit * 1000_u32.into()); - let incoming = CandidateInfo { who: who.clone(), deposit }; - >::try_mutate(|candidates| -> DispatchResult { - if !candidates.iter().any(|candidate| candidate.who == who) { - T::Currency::reserve(&who, deposit)?; - candidates.try_push(incoming).expect("we've respected the bounded vec limit"); - >::insert( - who.clone(), - frame_system::Pallet::::block_number() + T::KickThreshold::get(), - ); - } - Ok(()) + T::Currency::make_free_balance_be(who, deposit * 1000_u32.into()); + >::try_mutate(|list| { + list.try_push(CandidateInfo { who: who.clone(), deposit }).unwrap(); + Ok::<(), BenchmarkError>(()) }) - .expect("only returns ok"); + .unwrap(); + T::Currency::reserve(who, deposit)?; + >::insert( + who.clone(), + frame_system::Pallet::::block_number() + T::KickThreshold::get(), + ); } // now we need to fill up invulnerables @@ -226,10 +223,27 @@ mod benchmarks { } #[benchmark] - fn set_candidacy_bond() -> Result<(), BenchmarkError> { - let bond_amount: BalanceOf = T::Currency::minimum_balance() * 10u32.into(); + fn set_candidacy_bond( + c: Linear<0, { T::MaxCandidates::get() }>, + k: Linear<0, { T::MaxCandidates::get() }>, + ) -> Result<(), BenchmarkError> { + let initial_bond_amount: BalanceOf = T::Currency::minimum_balance() * 2u32.into(); + >::put(initial_bond_amount); + register_validators::(c); + register_candidates::(c); + let kicked = cmp::min(k, c); let origin = T::UpdateOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let bond_amount = if k > 0 { + >::mutate(|candidates| { + for info in candidates.iter_mut().skip(kicked as usize) { + info.deposit = T::Currency::minimum_balance() * 3u32.into(); + } + }); + T::Currency::minimum_balance() * 3u32.into() + } else { + T::Currency::minimum_balance() + }; #[extrinsic_call] _(origin as T::RuntimeOrigin, bond_amount); @@ -238,6 +252,35 @@ mod benchmarks { Ok(()) } + #[benchmark] + fn update_bond( + c: Linear<{ min_candidates::() + 1 }, { T::MaxCandidates::get() }>, + ) -> Result<(), BenchmarkError> { + >::put(T::Currency::minimum_balance()); + >::put(c); + + register_validators::(c); + register_candidates::(c); + + let caller = >::get()[0].who.clone(); + v2::whitelist!(caller); + + let bond_amount: BalanceOf = + T::Currency::minimum_balance() + T::Currency::minimum_balance(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), bond_amount); + + assert_last_event::( + Event::CandidateBondUpdated { account_id: caller, deposit: bond_amount }.into(), + ); + assert!( + >::get().iter().last().unwrap().deposit == + T::Currency::minimum_balance() * 2u32.into() + ); + Ok(()) + } + // worse case is when we have all the max-candidate slots filled except one, and we fill that // one. #[benchmark] @@ -267,6 +310,36 @@ mod benchmarks { ); } + #[benchmark] + fn take_candidate_slot(c: Linear<{ min_candidates::() + 1 }, { T::MaxCandidates::get() }>) { + >::put(T::Currency::minimum_balance()); + >::put(1); + + register_validators::(c); + register_candidates::(c); + + let caller: T::AccountId = whitelisted_caller(); + let bond: BalanceOf = T::Currency::minimum_balance() * 10u32.into(); + T::Currency::make_free_balance_be(&caller, bond); + + >::set_keys( + RawOrigin::Signed(caller.clone()).into(), + keys::(c + 1), + Vec::new(), + ) + .unwrap(); + + let target = >::get().iter().last().unwrap().who.clone(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), bond / 2u32.into(), target.clone()); + + assert_last_event::( + Event::CandidateReplaced { old: target, new: caller, deposit: bond / 2u32.into() } + .into(), + ); + } + // worse case is the last candidate leaving. #[benchmark] fn leave_intent(c: Linear<{ min_candidates::() + 1 }, { T::MaxCandidates::get() }>) { @@ -276,7 +349,7 @@ mod benchmarks { register_validators::(c); register_candidates::(c); - let leaving = >::get().last().unwrap().who.clone(); + let leaving = >::get().iter().last().unwrap().who.clone(); v2::whitelist!(leaving); #[extrinsic_call] @@ -323,31 +396,37 @@ mod benchmarks { let new_block: BlockNumberFor = 1800u32.into(); let zero_block: BlockNumberFor = 0u32.into(); - let candidates = >::get(); + let candidates: Vec = >::get() + .iter() + .map(|candidate_info| candidate_info.who.clone()) + .collect(); let non_removals = c.saturating_sub(r); for i in 0..c { - >::insert(candidates[i as usize].who.clone(), zero_block); + >::insert(candidates[i as usize].clone(), zero_block); } if non_removals > 0 { for i in 0..non_removals { - >::insert(candidates[i as usize].who.clone(), new_block); + >::insert(candidates[i as usize].clone(), new_block); } } else { for i in 0..c { - >::insert(candidates[i as usize].who.clone(), new_block); + >::insert(candidates[i as usize].clone(), new_block); } } let min_candidates = min_candidates::(); - let pre_length = >::get().len(); + let pre_length = >::decode_len().unwrap_or_default(); frame_system::Pallet::::set_block_number(new_block); - assert!(>::get().len() == c as usize); - + let current_length: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + assert!(c == current_length); #[block] { as SessionManager<_>>::new_session(0); @@ -357,16 +436,20 @@ mod benchmarks { // candidates > removals and remaining candidates > min candidates // => remaining candidates should be shorter than before removal, i.e. some were // actually removed. - assert!(>::get().len() < pre_length); + assert!(>::decode_len().unwrap_or_default() < pre_length); } else if c > r && non_removals < min_candidates { // candidates > removals and remaining candidates would be less than min candidates // => remaining candidates should equal min candidates, i.e. some were removed up to // the minimum, but then any more were "forced" to stay in candidates. - assert!(>::get().len() == min_candidates as usize); + let current_length: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + assert!(min_candidates == current_length); } else { // removals >= candidates, non removals must == 0 // can't remove more than exist - assert!(>::get().len() == pre_length); + assert!(>::decode_len().unwrap_or_default() == pre_length); } } diff --git a/cumulus/pallets/collator-selection/src/lib.rs b/cumulus/pallets/collator-selection/src/lib.rs index 24493ce9d9c..7449f4d68c7 100644 --- a/cumulus/pallets/collator-selection/src/lib.rs +++ b/cumulus/pallets/collator-selection/src/lib.rs @@ -35,16 +35,36 @@ //! //! 1. [`Invulnerables`]: a set of collators appointed by governance. These accounts will always be //! collators. -//! 2. [`Candidates`]: these are *candidates to the collation task* and may or may not be elected as -//! a final collator. +//! 2. [`CandidateList`]: these are *candidates to the collation task* and may or may not be elected +//! as a final collator. //! -//! The current implementation resolves congestion of [`Candidates`] in a first-come-first-serve -//! manner. +//! The current implementation resolves congestion of [`CandidateList`] through a simple auction +//! mechanism. Candidates bid for the collator slots and at the end of the session, the auction ends +//! and the top candidates are selected to become collators. The number of selected candidates is +//! determined by the value of `DesiredCandidates`. +//! +//! Before the list reaches full capacity, candidates can register by placing the minimum bond +//! through `register_as_candidate`. Then, if an account wants to participate in the collator slot +//! auction, they have to replace an existing candidate by placing a greater deposit through +//! `take_candidate_slot`. Existing candidates can increase their bids through `update_bond`. +//! +//! At any point, an account can take the place of another account in the candidate list if they put +//! up a greater deposit than the target. While new joiners would like to deposit as little as +//! possible to participate in the auction, the replacement threat incentivizes candidates to bid as +//! close to their budget as possible in order to avoid being replaced. +//! +//! Candidates which are not on "winning" slots in the list can also decrease their deposits through +//! `update_bond`, but candidates who are on top slots and try to decrease their deposits will fail +//! in order to enforce auction mechanics and have meaningful bids. //! //! Candidates will not be allowed to get kicked or `leave_intent` if the total number of collators //! would fall below `MinEligibleCollators`. This is to ensure that some collators will always //! exist, i.e. someone is eligible to produce a block. //! +//! When a new session starts, candidates with the highest deposits will be selected in order until +//! the desired number of collators is reached. Candidates can increase or decrease their deposits +//! between sessions in order to ensure they receive a slot in the collator list. +//! //! ### Rewards //! //! The Collator Selection pallet maintains an on-chain account (the "Pot"). In each block, the @@ -56,8 +76,8 @@ //! //! To initiate rewards, an ED needs to be transferred to the pot address. //! -//! Note: Eventually the Pot distribution may be modified as discussed in -//! [this issue](https://github.com/paritytech/statemint/issues/21#issuecomment-810481073). +//! Note: Eventually the Pot distribution may be modified as discussed in [this +//! issue](https://github.com/paritytech/statemint/issues/21#issuecomment-810481073). #![cfg_attr(not(feature = "std"), no_std)] @@ -182,9 +202,12 @@ pub mod pallet { /// The (community, limited) collation candidates. `Candidates` and `Invulnerables` should be /// mutually exclusive. + /// + /// This list is sorted in ascending order by deposit and when the deposits are equal, the least + /// recently updated is considered greater. #[pallet::storage] - #[pallet::getter(fn candidates)] - pub type Candidates = StorageValue< + #[pallet::getter(fn candidate_list)] + pub type CandidateList = StorageValue< _, BoundedVec>, T::MaxCandidates>, ValueQuery, @@ -261,8 +284,12 @@ pub mod pallet { NewCandidacyBond { bond_amount: BalanceOf }, /// A new candidate joined. CandidateAdded { account_id: T::AccountId, deposit: BalanceOf }, + /// Bond of a candidate updated. + CandidateBondUpdated { account_id: T::AccountId, deposit: BalanceOf }, /// A candidate was removed. CandidateRemoved { account_id: T::AccountId }, + /// An account was replaced in the candidate list by another one. + CandidateReplaced { old: T::AccountId, new: T::AccountId, deposit: BalanceOf }, /// An account was unable to be added to the Invulnerables because they did not have keys /// registered. Other Invulnerables may have been set. InvalidInvulnerableSkipped { account_id: T::AccountId }, @@ -288,12 +315,38 @@ pub mod pallet { NoAssociatedValidatorId, /// Validator ID is not yet registered. ValidatorNotRegistered, + /// Could not insert in the candidate list. + InsertToCandidateListFailed, + /// Could not remove from the candidate list. + RemoveFromCandidateListFailed, + /// New deposit amount would be below the minimum candidacy bond. + DepositTooLow, + /// Could not update the candidate list. + UpdateCandidateListFailed, + /// Deposit amount is too low to take the target's slot in the candidate list. + InsufficientBond, + /// The target account to be replaced in the candidate list is not a candidate. + TargetIsNotCandidate, + /// The updated deposit amount is equal to the amount already reserved. + IdenticalDeposit, + /// Cannot lower candidacy bond while occupying a future collator slot in the list. + InvalidUnreserve, } #[pallet::hooks] impl Hooks> for Pallet { fn integrity_test() { assert!(T::MinEligibleCollators::get() > 0, "chain must require at least one collator"); + assert!( + T::MaxInvulnerables::get().saturating_add(T::MaxCandidates::get()) >= + T::MinEligibleCollators::get(), + "invulnerables and candidates must be able to satisfy collator demand" + ); + } + + #[cfg(feature = "try-runtime")] + fn try_state(_: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() } } @@ -307,8 +360,8 @@ pub mod pallet { /// acceptable Invulnerables, and is not proposing a _set_ of new Invulnerables. /// /// This call does not maintain mutual exclusivity of `Invulnerables` and `Candidates`. It - /// is recommended to use a batch of `add_invulnerable` and `remove_invulnerable` instead. - /// A `batch_all` can also be used to enforce atomicity. If any candidates are included in + /// is recommended to use a batch of `add_invulnerable` and `remove_invulnerable` instead. A + /// `batch_all` can also be used to enforce atomicity. If any candidates are included in /// `new`, they should be removed with `remove_invulnerable_candidate` after execution. /// /// Must be called by the `UpdateOrigin`. @@ -319,8 +372,9 @@ pub mod pallet { // don't wipe out the collator set if new.is_empty() { + // Casting `u32` to `usize` should be safe on all machines running this. ensure!( - Candidates::::decode_len().unwrap_or_default() >= + CandidateList::::decode_len().unwrap_or_default() >= T::MinEligibleCollators::get() as usize, Error::::TooFewEligibleCollators ); @@ -401,17 +455,47 @@ pub mod pallet { /// Set the candidacy bond amount. /// + /// If the candidacy bond is increased by this call, all current candidates which have a + /// deposit lower than the new bond will be kicked from the list and get their deposits + /// back. + /// /// The origin for this call must be the `UpdateOrigin`. #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::set_candidacy_bond())] + #[pallet::weight(T::WeightInfo::set_candidacy_bond( + T::MaxCandidates::get(), + T::MaxCandidates::get() + ))] pub fn set_candidacy_bond( origin: OriginFor, bond: BalanceOf, ) -> DispatchResultWithPostInfo { T::UpdateOrigin::ensure_origin(origin)?; - >::put(bond); + let bond_increased = >::mutate(|old_bond| -> bool { + let bond_increased = *old_bond < bond; + *old_bond = bond; + bond_increased + }); + let initial_len = >::decode_len().unwrap_or_default(); + let kicked = (bond_increased && initial_len > 0) + .then(|| { + // Closure below returns the number of candidates which were kicked because + // their deposits were lower than the new candidacy bond. + >::mutate(|candidates| -> usize { + let first_safe_candidate = candidates + .iter() + .position(|candidate| candidate.deposit >= bond) + .unwrap_or(initial_len); + let kicked_candidates = candidates.drain(..first_safe_candidate); + for candidate in kicked_candidates { + T::Currency::unreserve(&candidate.who, candidate.deposit); + >::remove(candidate.who); + } + first_safe_candidate + }) + }) + .unwrap_or_default(); Self::deposit_event(Event::NewCandidacyBond { bond_amount: bond }); - Ok(().into()) + Ok(Some(T::WeightInfo::set_candidacy_bond(initial_len as u32, kicked as u32)).into()) } /// Register this account as a collator candidate. The account must (a) already have @@ -424,8 +508,11 @@ pub mod pallet { let who = ensure_signed(origin)?; // ensure we are below limit. - let length = >::decode_len().unwrap_or_default(); - ensure!((length as u32) < Self::desired_candidates(), Error::::TooManyCandidates); + let length: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + ensure!(length < T::MaxCandidates::get(), Error::::TooManyCandidates); ensure!(!Self::invulnerables().contains(&who), Error::::AlreadyInvulnerable); let validator_key = T::ValidatorIdOf::convert(who.clone()) @@ -437,25 +524,27 @@ pub mod pallet { let deposit = Self::candidacy_bond(); // First authored block is current block plus kick threshold to handle session delay - let incoming = CandidateInfo { who: who.clone(), deposit }; - - let current_count = - >::try_mutate(|candidates| -> Result { - if candidates.iter().any(|candidate| candidate.who == who) { - Err(Error::::AlreadyCandidate)? - } else { - T::Currency::reserve(&who, deposit)?; - candidates.try_push(incoming).map_err(|_| Error::::TooManyCandidates)?; - >::insert( - who.clone(), - frame_system::Pallet::::block_number() + T::KickThreshold::get(), - ); - Ok(candidates.len()) - } - })?; + >::try_mutate(|candidates| -> Result<(), DispatchError> { + ensure!( + !candidates.iter().any(|candidate_info| candidate_info.who == who), + Error::::AlreadyCandidate + ); + T::Currency::reserve(&who, deposit)?; + >::insert( + who.clone(), + frame_system::Pallet::::block_number() + T::KickThreshold::get(), + ); + candidates + .try_insert(0, CandidateInfo { who: who.clone(), deposit }) + .map_err(|_| Error::::InsertToCandidateListFailed)?; + Ok(()) + })?; Self::deposit_event(Event::CandidateAdded { account_id: who, deposit }); - Ok(Some(T::WeightInfo::register_as_candidate(current_count as u32)).into()) + // Safe to do unchecked add here because we ensure above that `length < + // T::MaxCandidates::get()`, and since `T::MaxCandidates` is `u32` it can be at most + // `u32::MAX`, therefore `length + 1` cannot overflow. + Ok(Some(T::WeightInfo::register_as_candidate(length + 1)).into()) } /// Deregister `origin` as a collator candidate. Note that the collator can only leave on @@ -468,13 +557,14 @@ pub mod pallet { pub fn leave_intent(origin: OriginFor) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; ensure!( - Self::eligible_collators() > T::MinEligibleCollators::get() as usize, + Self::eligible_collators() > T::MinEligibleCollators::get(), Error::::TooFewEligibleCollators ); + let length = >::decode_len().unwrap_or_default(); // Do remove their last authored block. - let current_count = Self::try_remove_candidate(&who, true)?; + Self::try_remove_candidate(&who, true)?; - Ok(Some(T::WeightInfo::leave_intent(current_count as u32)).into()) + Ok(Some(T::WeightInfo::leave_intent(length.saturating_sub(1) as u32)).into()) } /// Add a new account `who` to the list of `Invulnerables` collators. `who` must have @@ -521,7 +611,7 @@ pub mod pallet { .unwrap_or_default() .try_into() .unwrap_or(T::MaxInvulnerables::get().saturating_sub(1)), - Candidates::::decode_len() + >::decode_len() .unwrap_or_default() .try_into() .unwrap_or(T::MaxCandidates::get()), @@ -540,7 +630,7 @@ pub mod pallet { T::UpdateOrigin::ensure_origin(origin)?; ensure!( - Self::eligible_collators() > T::MinEligibleCollators::get() as usize, + Self::eligible_collators() > T::MinEligibleCollators::get(), Error::::TooFewEligibleCollators ); @@ -554,6 +644,154 @@ pub mod pallet { Self::deposit_event(Event::InvulnerableRemoved { account_id: who }); Ok(()) } + + /// Update the candidacy bond of collator candidate `origin` to a new amount `new_deposit`. + /// + /// Setting a `new_deposit` that is lower than the current deposit while `origin` is + /// occupying a top-`DesiredCandidates` slot is not allowed. + /// + /// This call will fail if `origin` is not a collator candidate, the updated bond is lower + /// than the minimum candidacy bond, and/or the amount cannot be reserved. + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::update_bond(T::MaxCandidates::get()))] + pub fn update_bond( + origin: OriginFor, + new_deposit: BalanceOf, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + ensure!(new_deposit >= >::get(), Error::::DepositTooLow); + // The function below will try to mutate the `CandidateList` entry for the caller to + // update their deposit to the new value of `new_deposit`. The return value is the + // position of the entry in the list, used for weight calculation. + let length = + >::try_mutate(|candidates| -> Result { + let idx = candidates + .iter() + .position(|candidate_info| candidate_info.who == who) + .ok_or_else(|| Error::::NotCandidate)?; + let candidate_count = candidates.len(); + // Remove the candidate from the list. + let mut info = candidates.remove(idx); + let old_deposit = info.deposit; + if new_deposit > old_deposit { + T::Currency::reserve(&who, new_deposit - old_deposit)?; + } else if new_deposit < old_deposit { + // Casting `u32` to `usize` should be safe on all machines running this. + ensure!( + idx.saturating_add(>::get() as usize) < + candidate_count, + Error::::InvalidUnreserve + ); + T::Currency::unreserve(&who, old_deposit - new_deposit); + } else { + return Err(Error::::IdenticalDeposit.into()) + } + + // Update the deposit and insert the candidate in the correct spot in the list. + info.deposit = new_deposit; + let new_pos = candidates + .iter() + .position(|candidate| candidate.deposit >= new_deposit) + .unwrap_or_else(|| candidates.len()); + candidates + .try_insert(new_pos, info) + .map_err(|_| Error::::InsertToCandidateListFailed)?; + + Ok(candidate_count) + })?; + + Self::deposit_event(Event::CandidateBondUpdated { + account_id: who, + deposit: new_deposit, + }); + Ok(Some(T::WeightInfo::update_bond(length as u32)).into()) + } + + /// The caller `origin` replaces a candidate `target` in the collator candidate list by + /// reserving `deposit`. The amount `deposit` reserved by the caller must be greater than + /// the existing bond of the target it is trying to replace. + /// + /// This call will fail if the caller is already a collator candidate or invulnerable, the + /// caller does not have registered session keys, the target is not a collator candidate, + /// and/or the `deposit` amount cannot be reserved. + #[pallet::call_index(8)] + #[pallet::weight(T::WeightInfo::take_candidate_slot(T::MaxCandidates::get()))] + pub fn take_candidate_slot( + origin: OriginFor, + deposit: BalanceOf, + target: T::AccountId, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + ensure!(!Self::invulnerables().contains(&who), Error::::AlreadyInvulnerable); + ensure!(deposit >= Self::candidacy_bond(), Error::::InsufficientBond); + + let validator_key = T::ValidatorIdOf::convert(who.clone()) + .ok_or(Error::::NoAssociatedValidatorId)?; + ensure!( + T::ValidatorRegistration::is_registered(&validator_key), + Error::::ValidatorNotRegistered + ); + + let length = >::decode_len().unwrap_or_default(); + // The closure below iterates through all elements of the candidate list to ensure that + // the caller isn't already a candidate and to find the target it's trying to replace in + // the list. The return value is a tuple of the position of the candidate to be replaced + // in the list along with its candidate information. + let target_info = >::try_mutate( + |candidates| -> Result>, DispatchError> { + // Find the position in the list of the candidate that is being replaced. + let mut target_info_idx = None; + let mut new_info_idx = None; + for (idx, candidate_info) in candidates.iter().enumerate() { + // While iterating through the candidates trying to find the target, + // also ensure on the same pass that our caller isn't already a + // candidate. + ensure!(candidate_info.who != who, Error::::AlreadyCandidate); + // If we find our target, update the position but do not stop the + // iteration since we're also checking that the caller isn't already a + // candidate. + if candidate_info.who == target { + target_info_idx = Some(idx); + } + // Find the spot where the new candidate would be inserted in the current + // version of the list. + if new_info_idx.is_none() && candidate_info.deposit >= deposit { + new_info_idx = Some(idx); + } + } + let target_info_idx = + target_info_idx.ok_or(Error::::TargetIsNotCandidate)?; + + // Remove the old candidate from the list. + let target_info = candidates.remove(target_info_idx); + ensure!(deposit > target_info.deposit, Error::::InsufficientBond); + + // We have removed one element before `new_info_idx`, so the position we have to + // insert to is reduced by 1. + let new_pos = new_info_idx + .map(|i| i.saturating_sub(1)) + .unwrap_or_else(|| candidates.len()); + let new_info = CandidateInfo { who: who.clone(), deposit }; + // Insert the new candidate in the correct spot in the list. + candidates + .try_insert(new_pos, new_info) + .expect("candidate count previously decremented; qed"); + + Ok(target_info) + }, + )?; + T::Currency::reserve(&who, deposit)?; + T::Currency::unreserve(&target_info.who, target_info.deposit); + >::remove(target_info.who.clone()); + >::insert( + who.clone(), + frame_system::Pallet::::block_number() + T::KickThreshold::get(), + ); + + Self::deposit_event(Event::CandidateReplaced { old: target, new: who, deposit }); + Ok(Some(T::WeightInfo::take_candidate_slot(length as u32)).into()) + } } impl Pallet { @@ -564,84 +802,122 @@ pub mod pallet { /// Return the total number of accounts that are eligible collators (candidates and /// invulnerables). - fn eligible_collators() -> usize { - Candidates::::decode_len() + fn eligible_collators() -> u32 { + >::decode_len() .unwrap_or_default() .saturating_add(Invulnerables::::decode_len().unwrap_or_default()) + .try_into() + .unwrap_or(u32::MAX) } /// Removes a candidate if they exist and sends them back their deposit. fn try_remove_candidate( who: &T::AccountId, remove_last_authored: bool, - ) -> Result { - let current_count = - >::try_mutate(|candidates| -> Result { - let index = candidates - .iter() - .position(|candidate| candidate.who == *who) - .ok_or(Error::::NotCandidate)?; - let candidate = candidates.remove(index); - T::Currency::unreserve(who, candidate.deposit); - if remove_last_authored { - >::remove(who.clone()) - }; - Ok(candidates.len()) - })?; + ) -> Result<(), DispatchError> { + >::try_mutate(|candidates| -> Result<(), DispatchError> { + let idx = candidates + .iter() + .position(|candidate_info| candidate_info.who == *who) + .ok_or(Error::::NotCandidate)?; + let deposit = candidates[idx].deposit; + T::Currency::unreserve(who, deposit); + candidates.remove(idx); + if remove_last_authored { + >::remove(who.clone()) + }; + Ok(()) + })?; Self::deposit_event(Event::CandidateRemoved { account_id: who.clone() }); - Ok(current_count) + Ok(()) } /// Assemble the current set of candidates and invulnerables into the next collator set. /// /// This is done on the fly, as frequent as we are told to do so, as the session manager. - pub fn assemble_collators( - candidates: BoundedVec, - ) -> Vec { + pub fn assemble_collators() -> Vec { + // Casting `u32` to `usize` should be safe on all machines running this. + let desired_candidates = >::get() as usize; let mut collators = Self::invulnerables().to_vec(); - collators.extend(candidates); + collators.extend( + >::get() + .iter() + .rev() + .cloned() + .take(desired_candidates) + .map(|candidate_info| candidate_info.who), + ); collators } /// Kicks out candidates that did not produce a block in the kick threshold and refunds /// their deposits. - pub fn kick_stale_candidates( - candidates: BoundedVec>, T::MaxCandidates>, - ) -> BoundedVec { + /// + /// Return value is the number of candidates left in the list. + pub fn kick_stale_candidates(candidates: impl IntoIterator) -> u32 { let now = frame_system::Pallet::::block_number(); let kick_threshold = T::KickThreshold::get(); let min_collators = T::MinEligibleCollators::get(); candidates .into_iter() .filter_map(|c| { - let last_block = >::get(c.who.clone()); + let last_block = >::get(c.clone()); let since_last = now.saturating_sub(last_block); - let is_invulnerable = Self::invulnerables().contains(&c.who); + let is_invulnerable = Self::invulnerables().contains(&c); let is_lazy = since_last >= kick_threshold; if is_invulnerable { - // They are invulnerable. No reason for them to be in Candidates also. + // They are invulnerable. No reason for them to be in `CandidateList` also. // We don't even care about the min collators here, because an Account // should not be a collator twice. - let _ = Self::try_remove_candidate(&c.who, false); + let _ = Self::try_remove_candidate(&c, false); None } else { - if Self::eligible_collators() <= min_collators as usize || !is_lazy { + if Self::eligible_collators() <= min_collators || !is_lazy { // Either this is a good collator (not lazy) or we are at the minimum // that the system needs. They get to stay. - Some(c.who) + Some(c) } else { // This collator has not produced a block recently enough. Bye bye. - let _ = Self::try_remove_candidate(&c.who, true); + let _ = Self::try_remove_candidate(&c, true); None } } }) - .collect::>() + .count() .try_into() .expect("filter_map operation can't result in a bounded vec larger than its original; qed") } + + /// Ensure the correctness of the state of this pallet. + /// + /// This should be valid before or after each state transition of this pallet. + /// + /// # Invariants + /// + /// ## `DesiredCandidates` + /// + /// * The current desired candidate count should not exceed the candidate list capacity. + /// * The number of selected candidates together with the invulnerables must be greater than + /// or equal to the minimum number of eligible collators. + #[cfg(any(test, feature = "try-runtime"))] + pub fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + let desired_candidates = >::get(); + + frame_support::ensure!( + desired_candidates <= T::MaxCandidates::get(), + "Shouldn't demand more candidates than the pallet config allows." + ); + + frame_support::ensure!( + desired_candidates.saturating_add(T::MaxInvulnerables::get()) >= + T::MinEligibleCollators::get(), + "Invulnerable set together with desired candidates should be able to meet the collator quota." + ); + + Ok(()) + } } /// Keep track of number of authored blocks per authority, uncles are counted as well since @@ -677,14 +953,23 @@ pub mod pallet { >::block_number(), ); - let candidates = Self::candidates(); - let candidates_len_before = candidates.len(); - let active_candidates = Self::kick_stale_candidates(candidates); - let removed = candidates_len_before - active_candidates.len(); - let result = Self::assemble_collators(active_candidates); + // The `expect` below is safe because the list is a `BoundedVec` with a max size of + // `T::MaxCandidates`, which is a `u32`. When `decode_len` returns `Some(len)`, `len` + // must be valid and at most `u32::MAX`, which must always be able to convert to `u32`. + let candidates_len_before: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .expect("length is at most `T::MaxCandidates`, so it must fit in `u32`; qed"); + let active_candidates_count = Self::kick_stale_candidates( + >::get() + .iter() + .map(|candidate_info| candidate_info.who.clone()), + ); + let removed = candidates_len_before.saturating_sub(active_candidates_count); + let result = Self::assemble_collators(); frame_system::Pallet::::register_extra_weight_unchecked( - T::WeightInfo::new_session(candidates_len_before as u32, removed as u32), + T::WeightInfo::new_session(candidates_len_before, removed), DispatchClass::Mandatory, ); Some(result) diff --git a/cumulus/pallets/collator-selection/src/tests.rs b/cumulus/pallets/collator-selection/src/tests.rs index d4dae513df3..ed2044ccdfa 100644 --- a/cumulus/pallets/collator-selection/src/tests.rs +++ b/cumulus/pallets/collator-selection/src/tests.rs @@ -28,7 +28,7 @@ fn basic_setup_works() { assert_eq!(CollatorSelection::desired_candidates(), 2); assert_eq!(CollatorSelection::candidacy_bond(), 10); - assert!(CollatorSelection::candidates().is_empty()); + assert_eq!(>::get().iter().count(), 0); // genesis should sort input assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); }); @@ -202,7 +202,8 @@ fn candidate_to_invulnerable_works() { initialize_to_block(1); assert_eq!(CollatorSelection::desired_candidates(), 2); assert_eq!(CollatorSelection::candidacy_bond(), 10); - assert_eq!(CollatorSelection::candidates(), Vec::new()); + + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); assert_eq!(Balances::free_balance(3), 100); @@ -226,7 +227,7 @@ fn candidate_to_invulnerable_works() { )); assert!(CollatorSelection::invulnerables().to_vec().contains(&3)); assert_eq!(Balances::free_balance(3), 100); - assert_eq!(CollatorSelection::candidates().len(), 1); + assert_eq!(>::get().iter().count(), 1); assert_ok!(CollatorSelection::add_invulnerable( RuntimeOrigin::signed(RootAccount::get()), @@ -240,7 +241,8 @@ fn candidate_to_invulnerable_works() { )); assert!(CollatorSelection::invulnerables().to_vec().contains(&4)); assert_eq!(Balances::free_balance(4), 100); - assert_eq!(CollatorSelection::candidates().len(), 0); + + assert_eq!(>::get().iter().count(), 0); }); } @@ -266,42 +268,230 @@ fn set_desired_candidates_works() { } #[test] -fn set_candidacy_bond() { +fn set_candidacy_bond_empty_candidate_list() { new_test_ext().execute_with(|| { // given assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); - // can set + // can decrease without candidates assert_ok!(CollatorSelection::set_candidacy_bond( RuntimeOrigin::signed(RootAccount::get()), 7 )); assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert!(>::get().is_empty()); // rejects bad origin. assert_noop!(CollatorSelection::set_candidacy_bond(RuntimeOrigin::signed(1), 8), BadOrigin); + + // can increase without candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert!(>::get().is_empty()); + assert_eq!(CollatorSelection::candidacy_bond(), 20); }); } #[test] -fn cannot_register_candidate_if_too_many() { +fn set_candidacy_bond_with_one_candidate() { new_test_ext().execute_with(|| { - // reset desired candidates: - >::put(0); + // given + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); - // can't accept anyone anymore. - assert_noop!( - CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3)), - Error::::TooManyCandidates, + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_eq!(>::get(), vec![candidate_3.clone()]); + + // can decrease with one candidate + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 7 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert_eq!(>::get(), vec![candidate_3.clone()]); + + // can increase up to initial deposit + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 10 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert_eq!(>::get(), vec![candidate_3.clone()]); + + // can increase past initial deposit, should kick existing candidate + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert!(>::get().is_empty()); + }); +} + +#[test] +fn set_candidacy_bond_with_many_candidates_same_deposit() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); + + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 10 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 10 }; + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_eq!( + >::get(), + vec![candidate_5.clone(), candidate_4.clone(), candidate_3.clone()] ); - // reset desired candidates: - >::put(1); + // can decrease with multiple candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 7 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert_eq!( + >::get(), + vec![candidate_5.clone(), candidate_4.clone(), candidate_3.clone()] + ); + + // can increase up to initial deposit + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 10 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert_eq!( + >::get(), + vec![candidate_5.clone(), candidate_4.clone(), candidate_3.clone()] + ); + + // can increase past initial deposit, should kick existing candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert!(>::get().is_empty()); + }); +} + +#[test] +fn set_candidacy_bond_with_many_candidates_different_deposits() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); + + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 20 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 30 }; + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 20)); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + + // can decrease with multiple candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 7 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + // can increase up to initial deposit + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 10 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + + // can increase to 4's deposit, should kick 3 + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 20); + assert_eq!( + >::get(), + vec![candidate_4.clone(), candidate_5.clone()] + ); + + // can increase past 4's deposit, should kick 4 + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 25 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 25); + assert_eq!(>::get(), vec![candidate_5.clone()]); + + // lowering the minimum deposit should have no effect + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 5 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 5); + assert_eq!(>::get(), vec![candidate_5.clone()]); + + // add 3 and 4 back but with higher deposits than minimum + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 10)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 20)); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + + // can increase the deposit above the current max in the list, all candidates should be + // kicked + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 40 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 40); + assert!(>::get().is_empty()); + }); +} + +#[test] +fn cannot_register_candidate_if_too_many() { + new_test_ext().execute_with(|| { + >::put(1); + + // MaxCandidates: u32 = 20 + // Aside from 3, 4, and 5, create enough accounts to have 21 potential + // candidates. + for i in 6..=23 { + Balances::make_free_balance_be(&i, 100); + let key = MockSessionKeys { aura: UintAuthorityId(i) }; + Session::set_keys(RuntimeOrigin::signed(i).into(), key, Vec::new()).unwrap(); + } + + for c in 3..=22 { + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(c))); + } - // but no more assert_noop!( - CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5)), + CollatorSelection::register_as_candidate(RuntimeOrigin::signed(23)), Error::::TooManyCandidates, ); }) @@ -310,7 +500,7 @@ fn cannot_register_candidate_if_too_many() { #[test] fn cannot_unregister_candidate_if_too_few() { new_test_ext().execute_with(|| { - assert_eq!(CollatorSelection::candidates(), Vec::new()); + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); assert_ok!(CollatorSelection::remove_invulnerable( RuntimeOrigin::signed(RootAccount::get()), @@ -368,8 +558,12 @@ fn cannot_register_dupe_candidate() { new_test_ext().execute_with(|| { // can add 3 as candidate assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + // tuple of (id, deposit). let addition = CandidateInfo { who: 3, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![addition]); + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![addition] + ); assert_eq!(CollatorSelection::last_authored_block(3), 10); assert_eq!(Balances::free_balance(3), 90); @@ -404,7 +598,8 @@ fn register_as_candidate_works() { // given assert_eq!(CollatorSelection::desired_candidates(), 2); assert_eq!(CollatorSelection::candidacy_bond(), 10); - assert_eq!(CollatorSelection::candidates(), Vec::new()); + + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); // take two endowed, non-invulnerables accounts. @@ -417,140 +612,888 @@ fn register_as_candidate_works() { assert_eq!(Balances::free_balance(3), 90); assert_eq!(Balances::free_balance(4), 90); - assert_eq!(CollatorSelection::candidates().len(), 2); + assert_eq!(>::get().iter().count(), 2); }); } #[test] -fn leave_intent() { +fn cannot_take_candidate_slot_if_invulnerable() { new_test_ext().execute_with(|| { - // register a candidate. - assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); - assert_eq!(Balances::free_balance(3), 90); - - // register too so can leave above min candidates - assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); - assert_eq!(Balances::free_balance(5), 90); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); - // cannot leave if not candidate. + // can't 1 because it is invulnerable. assert_noop!( - CollatorSelection::leave_intent(RuntimeOrigin::signed(4)), - Error::::NotCandidate + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(1), 50u64.into(), 2), + Error::::AlreadyInvulnerable, ); - - // bond is returned - assert_ok!(CollatorSelection::leave_intent(RuntimeOrigin::signed(3))); - assert_eq!(Balances::free_balance(3), 100); - assert_eq!(CollatorSelection::last_authored_block(3), 0); - }); + }) } #[test] -fn authorship_event_handler() { +fn cannot_take_candidate_slot_if_keys_not_registered() { new_test_ext().execute_with(|| { - // put 100 in the pot + 5 for ED - Balances::make_free_balance_be(&CollatorSelection::account_id(), 105); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(42), 50u64.into(), 3), + Error::::ValidatorNotRegistered + ); + }) +} - // 4 is the default author. - assert_eq!(Balances::free_balance(4), 100); +#[test] +fn cannot_take_candidate_slot_if_duplicate() { + new_test_ext().execute_with(|| { + // can add 3 as candidate + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); - // triggers `note_author` - Authorship::on_initialize(1); + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 10 }; + let actual_candidates = + >::get().iter().cloned().collect::>(); + assert_eq!(actual_candidates, vec![candidate_4, candidate_3]); + assert_eq!(CollatorSelection::last_authored_block(3), 10); + assert_eq!(CollatorSelection::last_authored_block(4), 10); + assert_eq!(Balances::free_balance(3), 90); - let collator = CandidateInfo { who: 4, deposit: 10 }; + // but no more + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(3), 50u64.into(), 4), + Error::::AlreadyCandidate, + ); + }) +} - assert_eq!(CollatorSelection::candidates(), vec![collator]); - assert_eq!(CollatorSelection::last_authored_block(4), 0); +#[test] +fn cannot_take_candidate_slot_if_target_invalid() { + new_test_ext().execute_with(|| { + // can add 3 as candidate + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_3] + ); + assert_eq!(CollatorSelection::last_authored_block(3), 10); + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 100); - // half of the pot goes to the collator who's the author (4 in tests). - assert_eq!(Balances::free_balance(4), 140); - // half + ED stays. - assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 55); - }); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(4), 50u64.into(), 5), + Error::::TargetIsNotCandidate, + ); + }) } #[test] -fn fees_edgecases() { +fn cannot_take_candidate_slot_if_poor() { new_test_ext().execute_with(|| { - // Nothing panics, no reward when no ED in balance - Authorship::on_initialize(1); - // put some money into the pot at ED - Balances::make_free_balance_be(&CollatorSelection::account_id(), 5); - // 4 is the default author. - assert_eq!(Balances::free_balance(4), 100); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); - // triggers `note_author` - Authorship::on_initialize(1); + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(33), 0); - let collator = CandidateInfo { who: 4, deposit: 10 }; + // works + assert_ok!(CollatorSelection::take_candidate_slot( + RuntimeOrigin::signed(3), + 20u64.into(), + 4 + )); - assert_eq!(CollatorSelection::candidates(), vec![collator]); - assert_eq!(CollatorSelection::last_authored_block(4), 0); - // Nothing received - assert_eq!(Balances::free_balance(4), 90); - // all fee stays - assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 5); + // poor + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(33), 30u64.into(), 3), + BalancesError::::InsufficientBalance, + ); }); } #[test] -fn session_management_works() { +fn cannot_take_candidate_slot_if_insufficient_deposit() { new_test_ext().execute_with(|| { - initialize_to_block(1); - - assert_eq!(SessionChangeBlock::get(), 0); - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - - initialize_to_block(4); - - assert_eq!(SessionChangeBlock::get(), 0); - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - - // add a new collator assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); - - // session won't see this. - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - // but we have a new candidate. - assert_eq!(CollatorSelection::candidates().len(), 1); - - initialize_to_block(10); - assert_eq!(SessionChangeBlock::get(), 10); - // pallet-session has 1 session delay; current validators are the same. - assert_eq!(Session::validators(), vec![1, 2]); - // queued ones are changed, and now we have 3. - assert_eq!(Session::queued_keys().len(), 3); - // session handlers (aura, et. al.) cannot see this yet. - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - - initialize_to_block(20); - assert_eq!(SessionChangeBlock::get(), 20); - // changed are now reflected to session handlers. - assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3]); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 60u64.into())); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(4), 5u64.into(), 3), + Error::::InsufficientBond, + ); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); }); } #[test] -fn kick_mechanism() { +fn cannot_take_candidate_slot_if_deposit_less_than_target() { new_test_ext().execute_with(|| { - // add a new collator assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); - assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); - initialize_to_block(10); - assert_eq!(CollatorSelection::candidates().len(), 2); - initialize_to_block(20); - assert_eq!(SessionChangeBlock::get(), 20); - // 4 authored this block, gets to stay 3 was kicked - assert_eq!(CollatorSelection::candidates().len(), 1); - // 3 will be kicked after 1 session delay - assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 4]); - let collator = CandidateInfo { who: 4, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![collator]); - assert_eq!(CollatorSelection::last_authored_block(4), 20); - initialize_to_block(30); - // 3 gets kicked after 1 session delay - assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4]); - // kicked collator gets funds back + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 60u64.into())); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(4), 20u64.into(), 3), + Error::::InsufficientBond, + ); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); + }); +} + +#[test] +fn take_candidate_slot_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take two endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_eq!(>::get().iter().count(), 3); + + Balances::make_free_balance_be(&6, 100); + let key = MockSessionKeys { aura: UintAuthorityId(6) }; + Session::set_keys(RuntimeOrigin::signed(6).into(), key, Vec::new()).unwrap(); + + assert_ok!(CollatorSelection::take_candidate_slot( + RuntimeOrigin::signed(6), + 50u64.into(), + 4 + )); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 90); + assert_eq!(Balances::free_balance(6), 50); + + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_6 = CandidateInfo { who: 6, deposit: 50 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 10 }; + let mut actual_candidates = + >::get().iter().cloned().collect::>(); + actual_candidates.sort_by(|info_1, info_2| info_1.deposit.cmp(&info_2.deposit)); + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_5, candidate_3, candidate_6] + ); + }); +} + +#[test] +fn increase_candidacy_bond_non_candidate_account() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 20), + Error::::NotCandidate + ); + }); +} + +#[test] +fn increase_candidacy_bond_insufficient_balance() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take two endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 110), + BalancesError::::InsufficientBalance + ); + + assert_eq!(Balances::free_balance(3), 90); + }); +} + +#[test] +fn increase_candidacy_bond_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 40)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 80); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 40)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 60)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 60); + assert_eq!(Balances::free_balance(4), 40); + assert_eq!(Balances::free_balance(5), 60); + }); +} + +#[test] +fn decrease_candidacy_bond_non_candidate_account() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + + assert_eq!(Balances::free_balance(5), 100); + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 10), + Error::::NotCandidate + ); + assert_eq!(Balances::free_balance(5), 100); + }); +} + +#[test] +fn decrease_candidacy_bond_insufficient_funds() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take two endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 60)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 60)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 40); + assert_eq!(Balances::free_balance(5), 40); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 0), + Error::::DepositTooLow + ); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(4), 5), + Error::::DepositTooLow + ); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 9), + Error::::DepositTooLow + ); + + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 40); + assert_eq!(Balances::free_balance(5), 40); + }); +} + +#[test] +fn decrease_candidacy_bond_occupying_top_slot() { + new_test_ext().execute_with(|| { + assert_eq!(CollatorSelection::desired_candidates(), 2); + // Register 3 candidates. + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + // And update their bids. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 30u64.into())); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30u64.into())); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60u64.into())); + + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 30 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 30 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 60 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_4, candidate_3, candidate_5] + ); + + // Candidates 5 and 3 can't decrease their deposits because they are the 2 top candidates. + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 29), + Error::::InvalidUnreserve, + ); + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 29), + Error::::InvalidUnreserve, + ); + // But candidate 4 should have be able to decrease the deposit up to the minimum. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 29u64.into())); + + // Make candidate 4 outbid candidate 3, taking their spot as the second highest bid. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 35u64.into())); + + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 30 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 35 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 60 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_3, candidate_4, candidate_5] + ); + + // Now candidates 5 and 4 are the 2 top candidates, so they can't decrease their deposits. + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 34), + Error::::InvalidUnreserve, + ); + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(4), 34), + Error::::InvalidUnreserve, + ); + // Candidate 3 should have be able to decrease the deposit up to the minimum now that + // they've fallen out of the top spots. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 10u64.into())); + }); +} + +#[test] +fn decrease_candidacy_bond_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 40)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 80); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 10)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + }); +} + +#[test] +fn update_candidacy_bond_with_identical_amount() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 40)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 80); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20), + Error::::IdenticalDeposit + ); + assert_eq!(Balances::free_balance(3), 80); + }); +} + +#[test] +fn candidate_list_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 20)); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 30)); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 25)); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 10)); + + let candidate_3 = CandidateInfo { who: 3, deposit: 30 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 25 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_5, candidate_4, candidate_3] + ); + }); +} + +#[test] +fn leave_intent() { + new_test_ext().execute_with(|| { + // register a candidate. + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_eq!(Balances::free_balance(3), 90); + + // register too so can leave above min candidates + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_eq!(Balances::free_balance(5), 90); + + // cannot leave if not candidate. + assert_noop!( + CollatorSelection::leave_intent(RuntimeOrigin::signed(4)), + Error::::NotCandidate + ); + + // bond is returned + assert_ok!(CollatorSelection::leave_intent(RuntimeOrigin::signed(3))); + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(CollatorSelection::last_authored_block(3), 0); + }); +} + +#[test] +fn authorship_event_handler() { + new_test_ext().execute_with(|| { + // put 100 in the pot + 5 for ED + Balances::make_free_balance_be(&CollatorSelection::account_id(), 105); + + // 4 is the default author. + assert_eq!(Balances::free_balance(4), 100); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + // triggers `note_author` + Authorship::on_initialize(1); + + // tuple of (id, deposit). + let collator = CandidateInfo { who: 4, deposit: 10 }; + + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); + assert_eq!(CollatorSelection::last_authored_block(4), 0); + + // half of the pot goes to the collator who's the author (4 in tests). + assert_eq!(Balances::free_balance(4), 140); + // half + ED stays. + assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 55); + }); +} + +#[test] +fn fees_edgecases() { + new_test_ext().execute_with(|| { + // Nothing panics, no reward when no ED in balance + Authorship::on_initialize(1); + // put some money into the pot at ED + Balances::make_free_balance_be(&CollatorSelection::account_id(), 5); + // 4 is the default author. + assert_eq!(Balances::free_balance(4), 100); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + // triggers `note_author` + Authorship::on_initialize(1); + + // tuple of (id, deposit). + let collator = CandidateInfo { who: 4, deposit: 10 }; + + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); + assert_eq!(CollatorSelection::last_authored_block(4), 0); + // Nothing received + assert_eq!(Balances::free_balance(4), 90); + // all fee stays + assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 5); + }); +} + +#[test] +fn session_management_single_candidate() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + // add a new collator + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 1); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 3. + assert_eq!(Session::queued_keys().len(), 3); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3]); + }); +} + +#[test] +fn session_management_max_candidates() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 4]); + }); +} + +#[test] +fn session_management_increase_bid_with_list_update() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 5, 3]); + }); +} + +#[test] +fn session_management_candidate_list_eager_sort() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 5, 3]); + }); +} + +#[test] +fn session_management_reciprocal_outbidding() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + initialize_to_block(5); + + // candidates 3 and 4 saw they were outbid and preemptively bid more + // than 5 in the next block. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 70)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 70)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4, 3]); + }); +} + +#[test] +fn session_management_decrease_bid_after_auction() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + initialize_to_block(5); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 70)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 70)); + + initialize_to_block(5); + + // candidate 5 saw it was outbid and wants to take back its bid, but + // not entirely so they still keep their place in the candidate list + // in case there is an opportunity in the future. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 10)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4, 3]); + }); +} + +#[test] +fn kick_mechanism() { + new_test_ext().execute_with(|| { + // add a new collator + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + initialize_to_block(10); + assert_eq!(>::get().iter().count(), 2); + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // 4 authored this block, gets to stay 3 was kicked + assert_eq!(>::get().iter().count(), 1); + // 3 will be kicked after 1 session delay + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 4]); + // tuple of (id, deposit). + let collator = CandidateInfo { who: 4, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); + assert_eq!(CollatorSelection::last_authored_block(4), 20); + initialize_to_block(30); + // 3 gets kicked after 1 session delay + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4]); + // kicked collator gets funds back assert_eq!(Balances::free_balance(3), 100); }); } @@ -559,7 +1502,8 @@ fn kick_mechanism() { fn should_not_kick_mechanism_too_few() { new_test_ext().execute_with(|| { // remove the invulnerables and add new collators 3 and 5 - assert_eq!(CollatorSelection::candidates(), Vec::new()); + + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); assert_ok!(CollatorSelection::remove_invulnerable( RuntimeOrigin::signed(RootAccount::get()), @@ -573,30 +1517,34 @@ fn should_not_kick_mechanism_too_few() { )); initialize_to_block(10); - assert_eq!(CollatorSelection::candidates().len(), 2); + assert_eq!(>::get().iter().count(), 2); initialize_to_block(20); assert_eq!(SessionChangeBlock::get(), 20); // 4 authored this block, 3 is kicked, 5 stays because of too few collators - assert_eq!(CollatorSelection::candidates().len(), 1); + assert_eq!(>::get().iter().count(), 1); // 3 will be kicked after 1 session delay assert_eq!(SessionHandlerCollators::get(), vec![3, 5]); - let collator = CandidateInfo { who: 5, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![collator]); + // tuple of (id, deposit). + let collator = CandidateInfo { who: 3, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); assert_eq!(CollatorSelection::last_authored_block(4), 20); initialize_to_block(30); // 3 gets kicked after 1 session delay - assert_eq!(SessionHandlerCollators::get(), vec![5]); + assert_eq!(SessionHandlerCollators::get(), vec![3]); // kicked collator gets funds back - assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(5), 100); }); } #[test] fn should_kick_invulnerables_from_candidates_on_session_change() { new_test_ext().execute_with(|| { - assert_eq!(CollatorSelection::candidates(), Vec::new()); + assert_eq!(>::get().iter().count(), 0); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); assert_eq!(Balances::free_balance(3), 90); @@ -606,16 +1554,22 @@ fn should_kick_invulnerables_from_candidates_on_session_change() { vec![1, 2, 3] )); + // tuple of (id, deposit). let collator_3 = CandidateInfo { who: 3, deposit: 10 }; let collator_4 = CandidateInfo { who: 4, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![collator_3, collator_4.clone()]); + let actual_candidates = + >::get().iter().cloned().collect::>(); + assert_eq!(actual_candidates, vec![collator_4.clone(), collator_3]); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2, 3]); // session change initialize_to_block(10); // 3 is removed from candidates - assert_eq!(CollatorSelection::candidates(), vec![collator_4]); + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator_4] + ); // but not from invulnerables assert_eq!(CollatorSelection::invulnerables(), vec![1, 2, 3]); // and it got its deposit back diff --git a/cumulus/pallets/collator-selection/src/weights.rs b/cumulus/pallets/collator-selection/src/weights.rs index f8f86fb7dec..1c01ad6cd6f 100644 --- a/cumulus/pallets/collator-selection/src/weights.rs +++ b/cumulus/pallets/collator-selection/src/weights.rs @@ -30,9 +30,11 @@ pub trait WeightInfo { fn add_invulnerable(_b: u32, _c: u32) -> Weight; fn remove_invulnerable(_b: u32) -> Weight; fn set_desired_candidates() -> Weight; - fn set_candidacy_bond() -> Weight; + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight; fn register_as_candidate(_c: u32) -> Weight; fn leave_intent(_c: u32) -> Weight; + fn update_bond(_c: u32) -> Weight; + fn take_candidate_slot(_c: u32) -> Weight; fn note_author() -> Weight; fn new_session(_c: u32, _r: u32) -> Weight; } @@ -49,7 +51,7 @@ impl WeightInfo for SubstrateWeight { fn set_desired_candidates() -> Weight { Weight::from_parts(16_363_000_u64, 0).saturating_add(T::DbWeight::get().writes(1_u64)) } - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { Weight::from_parts(16_840_000_u64, 0).saturating_add(T::DbWeight::get().writes(1_u64)) } fn register_as_candidate(c: u32) -> Weight { @@ -66,6 +68,20 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } + fn update_bond(c: u32) -> Weight { + Weight::from_parts(55_336_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(151_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + fn take_candidate_slot(c: u32) -> Weight { + Weight::from_parts(71_196_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(198_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } fn note_author() -> Weight { Weight::from_parts(71_461_000_u64, 0) .saturating_add(T::DbWeight::get().reads(3_u64)) @@ -136,7 +152,7 @@ impl WeightInfo for () { fn set_desired_candidates() -> Weight { Weight::from_parts(16_363_000_u64, 0).saturating_add(RocksDbWeight::get().writes(1_u64)) } - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { Weight::from_parts(16_840_000_u64, 0).saturating_add(RocksDbWeight::get().writes(1_u64)) } fn register_as_candidate(c: u32) -> Weight { @@ -158,6 +174,20 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } + fn update_bond(c: u32) -> Weight { + Weight::from_parts(55_336_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(151_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + fn take_candidate_slot(c: u32) -> Weight { + Weight::from_parts(71_196_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(198_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } fn new_session(r: u32, c: u32) -> Weight { Weight::from_parts(0_u64, 0) // Standard Error: 1_010_000 diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs index 5c5a31eb348..c686bd6134a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs @@ -123,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -177,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs index c33e79970ff..b3062984baf 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs @@ -121,7 +121,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -175,6 +175,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs index d98abbbc2d3..aeda7bbbb6a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs @@ -124,7 +124,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -178,6 +178,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs index 095e784cf66..1fac2d59ab9 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs @@ -123,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -177,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs index cccb7c60924..72d8ba4045a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs @@ -123,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -177,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs index 6ed2c429186..f7c78f7db82 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs @@ -123,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -177,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs index 1fb0b765c06..f7e233189ab 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs @@ -123,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -177,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs index 9cbfa6ce80e..9dcee77082b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs @@ -124,7 +124,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -178,6 +178,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs index 2c729e8dc10..03f3ff602a5 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs @@ -121,7 +121,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -175,6 +175,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) -- GitLab From 1cd38c2cd12fda156c755bcb9375e01a1e077bd8 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Tue, 14 Nov 2023 18:24:45 +0100 Subject: [PATCH 26/74] Contracts: Bump contracts rococo (#2286) --- .../contracts/contracts-rococo/src/contracts.rs | 11 ++--------- .../runtimes/contracts/contracts-rococo/src/lib.rs | 2 +- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs index 1c99393d5e5..b86f797ee03 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs @@ -22,9 +22,7 @@ use frame_support::{ traits::{ConstBool, ConstU32, Nothing}, }; use pallet_contracts::{ - migration::{v12, v13, v14, v15}, - weights::SubstrateWeight, - Config, DebugInfo, DefaultAddressGenerator, Frame, Schedule, + weights::SubstrateWeight, Config, DebugInfo, DefaultAddressGenerator, Frame, Schedule, }; use sp_runtime::Perbill; @@ -70,12 +68,7 @@ impl Config for Runtime { type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type MaxDelegateDependencies = ConstU32<32>; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; - type Migrations = ( - v12::Migration, - v13::Migration, - v14::Migration, - v15::Migration, - ); + type Migrations = (); type RuntimeHoldReason = RuntimeHoldReason; type Debug = (); type Environment = (); diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 2a2f4141033..4c66e780ba9 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -131,7 +131,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 10000, + spec_version: 10001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, -- GitLab From 3a87390b30929dffdf015c31cadea4dfce9f34eb Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 14 Nov 2023 19:35:02 +0200 Subject: [PATCH 27/74] chainHead/tests: Fix clippy (#2325) Remove the genesis hash from tests: - Clippy was passing on the PR: https://github.com/paritytech/polkadot-sdk/pull/2296 - Clippy fails on master: https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/jobs/4328487 This was a race with merging: https://github.com/paritytech/polkadot-sdk/pull/2295, which introduced another test that used the `CHAIN_GENESIS` Signed-off-by: Alexandru Vasile --- substrate/client/rpc-spec-v2/src/chain_head/tests.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 11c6798bf0a..4e6775fe280 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -1627,7 +1627,6 @@ async fn follow_with_multiple_unpin_hashes() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), -- GitLab From 54f84285bf863c75d9383f7d9d3e7612410f23a4 Mon Sep 17 00:00:00 2001 From: jserrat <35823283+Jpserrat@users.noreply.github.com> Date: Tue, 14 Nov 2023 14:50:18 -0300 Subject: [PATCH 28/74] change prepare worker to use fork instead of threads (#1685) Co-authored-by: Marcin S --- Cargo.lock | 26 + .../node/core/candidate-validation/src/lib.rs | 24 +- .../core/candidate-validation/src/tests.rs | 12 +- .../benches/host_prepare_rococo_runtime.rs | 2 +- polkadot/node/core/pvf/common/Cargo.toml | 2 +- polkadot/node/core/pvf/common/src/error.rs | 55 +- polkadot/node/core/pvf/common/src/execute.rs | 51 +- .../node/core/pvf/common/src/worker/mod.rs | 12 +- .../node/core/pvf/execute-worker/Cargo.toml | 3 + .../node/core/pvf/execute-worker/src/lib.rs | 399 ++++++++--- .../node/core/pvf/prepare-worker/Cargo.toml | 2 + .../node/core/pvf/prepare-worker/src/lib.rs | 634 ++++++++++++------ polkadot/node/core/pvf/src/error.rs | 53 +- polkadot/node/core/pvf/src/execute/queue.rs | 15 +- .../node/core/pvf/src/execute/worker_intf.rs | 49 +- polkadot/node/core/pvf/src/prepare/pool.rs | 21 +- .../node/core/pvf/src/prepare/worker_intf.rs | 24 +- polkadot/node/core/pvf/src/testing.rs | 34 +- polkadot/node/core/pvf/tests/it/main.rs | 135 +--- polkadot/node/core/pvf/tests/it/process.rs | 383 +++++++++++ .../node/core/pvf/tests/it/worker_common.rs | 8 +- .../src/node/utility/pvf-host-and-workers.md | 47 +- .../list-syscalls/execute-worker-syscalls | 6 + .../list-syscalls/prepare-worker-syscalls | 5 + 24 files changed, 1468 insertions(+), 534 deletions(-) create mode 100644 polkadot/node/core/pvf/tests/it/process.rs diff --git a/Cargo.lock b/Cargo.lock index c57a5ce1393..d8308086822 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8627,6 +8627,17 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "nix" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +dependencies = [ + "bitflags 2.4.0", + "cfg-if", + "libc", +] + [[package]] name = "no-std-net" version = "0.6.0" @@ -9103,6 +9114,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "os_pipe" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ae859aa07428ca9a929b936690f8b12dc5f11dd8c6992a18ca93919f28bc177" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "os_str_bytes" version = "6.5.1" @@ -12525,6 +12546,9 @@ name = "polkadot-node-core-pvf-execute-worker" version = "1.0.0" dependencies = [ "cpu-time", + "libc", + "nix 0.27.1", + "os_pipe", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-parachain-primitives", @@ -12539,6 +12563,8 @@ dependencies = [ "cfg-if", "criterion 0.4.0", "libc", + "nix 0.27.1", + "os_pipe", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-primitives", diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 4232e5f1cdd..a3d6f047313 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -642,14 +642,19 @@ async fn validate_candidate_exhaustive( }, Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout)) => Ok(ValidationResult::Invalid(InvalidCandidate::Timeout)), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::WorkerReportedError(e))) => + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::WorkerReportedInvalid(e))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(e))), Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError( "ambiguous worker death".to_string(), ))), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic(err))) => + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError(err))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(err))), + + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousJobDeath(err))) => + Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(format!( + "ambiguous job death: {err}" + )))), Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::PrepareError(e))) => { // In principle if preparation of the `WASM` fails, the current candidate can not be the // reason for that. So we can't say whether it is invalid or not. In addition, with @@ -741,9 +746,9 @@ trait ValidationBackend { }; // Allow limited retries for each kind of error. + let mut num_death_retries_left = 1; + let mut num_job_error_retries_left = 1; let mut num_internal_retries_left = 1; - let mut num_awd_retries_left = 1; - let mut num_panic_retries_left = 1; loop { // Stop retrying if we exceeded the timeout. if total_time_start.elapsed() + retry_delay > exec_timeout { @@ -752,11 +757,12 @@ trait ValidationBackend { match validation_result { Err(ValidationError::InvalidCandidate( - WasmInvalidCandidate::AmbiguousWorkerDeath, - )) if num_awd_retries_left > 0 => num_awd_retries_left -= 1, - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic(_))) - if num_panic_retries_left > 0 => - num_panic_retries_left -= 1, + WasmInvalidCandidate::AmbiguousWorkerDeath | + WasmInvalidCandidate::AmbiguousJobDeath(_), + )) if num_death_retries_left > 0 => num_death_retries_left -= 1, + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError(_))) + if num_job_error_retries_left > 0 => + num_job_error_retries_left -= 1, Err(ValidationError::InternalError(_)) if num_internal_retries_left > 0 => num_internal_retries_left -= 1, _ => break, diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index af530a20c4e..cab823e1e63 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -695,11 +695,13 @@ fn candidate_validation_retry_panic_errors() { let v = executor::block_on(validate_candidate_exhaustive( MockValidateCandidateBackend::with_hardcoded_result_list(vec![ - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic("foo".into()))), - // Throw an AWD error, we should still retry again. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("foo".into()))), + // Throw an AJD error, we should still retry again. + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousJobDeath( + "baz".into(), + ))), // Throw another panic error. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic("bar".into()))), + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("bar".into()))), ]), validation_data, validation_code, @@ -1216,7 +1218,7 @@ fn precheck_properly_classifies_outcomes() { inner(Err(PrepareError::Prevalidation("foo".to_owned())), PreCheckOutcome::Invalid); inner(Err(PrepareError::Preparation("bar".to_owned())), PreCheckOutcome::Invalid); - inner(Err(PrepareError::Panic("baz".to_owned())), PreCheckOutcome::Invalid); + inner(Err(PrepareError::JobError("baz".to_owned())), PreCheckOutcome::Invalid); inner(Err(PrepareError::TimedOut), PreCheckOutcome::Failed); inner(Err(PrepareError::IoErr("fizz".to_owned())), PreCheckOutcome::Failed); diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs index d0cefae6cdb..378374a10b3 100644 --- a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs @@ -37,7 +37,7 @@ impl TestHost { where F: FnOnce(&mut Config), { - let (prepare_worker_path, execute_worker_path) = testing::get_and_check_worker_paths(); + let (prepare_worker_path, execute_worker_path) = testing::build_workers_and_get_paths(true); let cache_dir = tempfile::tempdir().unwrap(); let mut config = Config::new( diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 7dc8d307026..e3fda06963e 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -12,6 +12,7 @@ cpu-time = "1.0.0" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.139" +thiserror = "1.0.31" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -30,7 +31,6 @@ sp-tracing = { path = "../../../../../substrate/primitives/tracing" } [target.'cfg(target_os = "linux")'.dependencies] landlock = "0.3.0" seccompiler = "0.4.0" -thiserror = "1.0.31" [dev-dependencies] assert_matches = "1.4.0" diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs index 82b56562d8c..34475c481f7 100644 --- a/polkadot/node/core/pvf/common/src/error.rs +++ b/polkadot/node/core/pvf/common/src/error.rs @@ -35,9 +35,9 @@ pub enum PrepareError { /// Instantiation of the WASM module instance failed. #[codec(index = 2)] RuntimeConstruction(String), - /// An unexpected panic has occurred in the preparation worker. + /// An unexpected error has occurred in the preparation job. #[codec(index = 3)] - Panic(String), + JobError(String), /// Failed to prepare the PVF due to the time limit. #[codec(index = 4)] TimedOut, @@ -48,12 +48,12 @@ pub enum PrepareError { /// The temporary file for the artifact could not be created at the given cache path. This /// state is reported by the validation host (not by the worker). #[codec(index = 6)] - CreateTmpFileErr(String), + CreateTmpFile(String), /// The response from the worker is received, but the file cannot be renamed (moved) to the /// final destination location. This state is reported by the validation host (not by the /// worker). #[codec(index = 7)] - RenameTmpFileErr { + RenameTmpFile { err: String, // Unfortunately `PathBuf` doesn't implement `Encode`/`Decode`, so we do a fallible // conversion to `Option`. @@ -68,11 +68,14 @@ pub enum PrepareError { /// reported by the validation host (not by the worker). #[codec(index = 9)] ClearWorkerDir(String), + /// The preparation job process died, due to OOM, a seccomp violation, or some other factor. + JobDied(String), + #[codec(index = 10)] + /// Some error occurred when interfacing with the kernel. + #[codec(index = 11)] + Kernel(String), } -/// Pre-encoded length-prefixed `PrepareResult::Err(PrepareError::OutOfMemory)` -pub const OOM_PAYLOAD: &[u8] = b"\x02\x00\x00\x00\x00\x00\x00\x00\x01\x08"; - impl PrepareError { /// Returns whether this is a deterministic error, i.e. one that should trigger reliably. Those /// errors depend on the PVF itself and the sc-executor/wasmtime logic. @@ -83,12 +86,15 @@ impl PrepareError { pub fn is_deterministic(&self) -> bool { use PrepareError::*; match self { - Prevalidation(_) | Preparation(_) | Panic(_) | OutOfMemory => true, - TimedOut | + Prevalidation(_) | Preparation(_) | JobError(_) | OutOfMemory => true, IoErr(_) | - CreateTmpFileErr(_) | - RenameTmpFileErr { .. } | - ClearWorkerDir(_) => false, + JobDied(_) | + CreateTmpFile(_) | + RenameTmpFile { .. } | + ClearWorkerDir(_) | + Kernel(_) => false, + // Can occur due to issues with the PVF, but also due to factors like local load. + TimedOut => false, // Can occur due to issues with the PVF, but also due to local errors. RuntimeConstruction(_) => false, } @@ -102,14 +108,16 @@ impl fmt::Display for PrepareError { Prevalidation(err) => write!(f, "prevalidation: {}", err), Preparation(err) => write!(f, "preparation: {}", err), RuntimeConstruction(err) => write!(f, "runtime construction: {}", err), - Panic(err) => write!(f, "panic: {}", err), + JobError(err) => write!(f, "panic: {}", err), TimedOut => write!(f, "prepare: timeout"), IoErr(err) => write!(f, "prepare: io error while receiving response: {}", err), - CreateTmpFileErr(err) => write!(f, "prepare: error creating tmp file: {}", err), - RenameTmpFileErr { err, src, dest } => + JobDied(err) => write!(f, "prepare: prepare job died: {}", err), + CreateTmpFile(err) => write!(f, "prepare: error creating tmp file: {}", err), + RenameTmpFile { err, src, dest } => write!(f, "prepare: error renaming tmp file ({:?} -> {:?}): {}", src, dest, err), OutOfMemory => write!(f, "prepare: out of memory"), ClearWorkerDir(err) => write!(f, "prepare: error clearing worker cache: {}", err), + Kernel(err) => write!(f, "prepare: error interfacing with the kernel: {}", err), } } } @@ -133,9 +141,9 @@ pub enum InternalValidationError { // conversion to `Option`. path: Option, }, - /// An error occurred in the CPU time monitor thread. Should be totally unrelated to - /// validation. - CpuTimeMonitorThread(String), + /// Some error occurred when interfacing with the kernel. + Kernel(String), + /// Some non-deterministic preparation error occurred. NonDeterministicPrepareError(PrepareError), } @@ -158,17 +166,8 @@ impl fmt::Display for InternalValidationError { "validation: host could not clear the worker cache ({:?}) after a job: {}", path, err ), - CpuTimeMonitorThread(err) => - write!(f, "validation: an error occurred in the CPU time monitor thread: {}", err), + Kernel(err) => write!(f, "validation: error interfacing with the kernel: {}", err), NonDeterministicPrepareError(err) => write!(f, "validation: prepare: {}", err), } } } - -#[test] -fn pre_encoded_payloads() { - let oom_enc = PrepareResult::Err(PrepareError::OutOfMemory).encode(); - let mut oom_payload = oom_enc.len().to_le_bytes().to_vec(); - oom_payload.extend(oom_enc); - assert_eq!(oom_payload, OOM_PAYLOAD); -} diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index b89ab089af1..89e7c8e471a 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -28,9 +28,9 @@ pub struct Handshake { pub executor_params: ExecutorParams, } -/// The response from an execution job on the worker. +/// The response from the execution worker. #[derive(Debug, Encode, Decode)] -pub enum Response { +pub enum WorkerResponse { /// The job completed successfully. Ok { /// The result of parachain validation. @@ -41,14 +41,38 @@ pub enum Response { /// The candidate is invalid. InvalidCandidate(String), /// The job timed out. - TimedOut, - /// An unexpected panic has occurred in the execution worker. - Panic(String), + JobTimedOut, + /// The job process has died. We must kill the worker just in case. + /// + /// We cannot treat this as an internal error because malicious code may have killed the job. + /// We still retry it, because in the non-malicious case it is likely spurious. + JobDied(String), + /// An unexpected error occurred in the job process, e.g. failing to spawn a thread, panic, + /// etc. + /// + /// Because malicious code can cause a job error, we must not treat it as an internal error. We + /// still retry it, because in the non-malicious case it is likely spurious. + JobError(String), + /// Some internal error occurred. InternalError(InternalValidationError), } -impl Response { +/// The result of a job on the execution worker. +pub type JobResult = Result; + +/// The successful response from a job on the execution worker. +#[derive(Debug, Encode, Decode)] +pub enum JobResponse { + Ok { + /// The result of parachain validation. + result_descriptor: ValidationResult, + }, + /// The candidate is invalid. + InvalidCandidate(String), +} + +impl JobResponse { /// Creates an invalid response from a context `ctx` and a message `msg` (which can be empty). pub fn format_invalid(ctx: &'static str, msg: &str) -> Self { if msg.is_empty() { @@ -58,3 +82,18 @@ impl Response { } } } + +/// An unexpected error occurred in the execution job process. Because this comes from the job, +/// which executes untrusted code, this error must likewise be treated as untrusted. That is, we +/// cannot raise an internal error based on this. +#[derive(thiserror::Error, Debug, Encode, Decode)] +pub enum JobError { + #[error("The job timed out")] + TimedOut, + #[error("An unexpected panic has occurred in the execution job: {0}")] + Panic(String), + #[error("Could not spawn the requested thread: {0}")] + CouldNotSpawnThread(String), + #[error("An error occurred in the CPU time monitor thread: {0}")] + CpuTimeMonitorThread(String), +} diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs index f6a67b98321..86f47acccac 100644 --- a/polkadot/node/core/pvf/common/src/worker/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/mod.rs @@ -205,9 +205,15 @@ impl fmt::Display for WorkerKind { } } -// The worker version must be passed in so that we accurately get the version of the worker, and not -// the version that this crate was compiled with. -pub fn worker_event_loop( +// NOTE: The worker version must be passed in so that we accurately get the version of the worker, +// and not the version that this crate was compiled with. +// +// NOTE: This must not spawn any threads due to safety requirements in `event_loop` and to avoid +// errors in [`security::unshare_user_namespace_and_change_root`]. +// +/// Initializes the worker process, then runs the given event loop, which spawns a new job process +/// to securely handle each incoming request. +pub fn run_worker( worker_kind: WorkerKind, socket_path: PathBuf, #[cfg_attr(not(target_os = "linux"), allow(unused_mut))] mut worker_dir_path: PathBuf, diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 77a9420961c..40e0ff4f0a1 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -9,6 +9,9 @@ license.workspace = true [dependencies] cpu-time = "1.0.0" gum = { package = "tracing-gum", path = "../../../gum" } +os_pipe = "1.1.4" +nix = { version = "0.27.1", features = ["resource", "process"]} +libc = "0.2.139" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index 8872f9bc8dd..9ec811686b8 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -25,23 +25,33 @@ pub use polkadot_node_core_pvf_common::{ const LOG_TARGET: &str = "parachain::pvf-execute-worker"; use cpu_time::ProcessTime; +use nix::{ + errno::Errno, + sys::{ + resource::{Usage, UsageWho}, + wait::WaitStatus, + }, + unistd::{ForkResult, Pid}, +}; +use os_pipe::{self, PipeReader, PipeWriter}; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, - execute::{Handshake, Response}, + execute::{Handshake, JobError, JobResponse, JobResult, WorkerResponse}, framed_recv_blocking, framed_send_blocking, worker::{ - cpu_time_monitor_loop, stringify_panic_payload, + cpu_time_monitor_loop, run_worker, stringify_panic_payload, thread::{self, WaitOutcome}, - worker_event_loop, WorkerKind, + WorkerKind, }, }; use polkadot_parachain_primitives::primitives::ValidationResult; use polkadot_primitives::{executor_params::DEFAULT_NATIVE_STACK_MAX, ExecutorParams}; use std::{ - io, + io::{self, Read}, os::unix::net::UnixStream, path::PathBuf, + process, sync::{mpsc::channel, Arc}, time::Duration, }; @@ -105,7 +115,7 @@ fn recv_request(stream: &mut UnixStream) -> io::Result<(Vec, Duration)> { Ok((params, execution_timeout)) } -fn send_response(stream: &mut UnixStream, response: Response) -> io::Result<()> { +fn send_response(stream: &mut UnixStream, response: WorkerResponse) -> io::Result<()> { framed_send_blocking(stream, &response.encode()) } @@ -131,7 +141,7 @@ pub fn worker_entrypoint( worker_version: Option<&str>, security_status: SecurityStatus, ) { - worker_event_loop( + run_worker( WorkerKind::Execute, socket_path, worker_dir_path, @@ -139,7 +149,7 @@ pub fn worker_entrypoint( worker_version, &security_status, |mut stream, worker_dir_path| { - let worker_pid = std::process::id(); + let worker_pid = process::id(); let artifact_path = worker_dir::execute_artifact(&worker_dir_path); let Handshake { executor_params } = recv_handshake(&mut stream)?; @@ -157,7 +167,7 @@ pub fn worker_entrypoint( let compiled_artifact_blob = match std::fs::read(&artifact_path) { Ok(bytes) => bytes, Err(err) => { - let response = Response::InternalError( + let response = WorkerResponse::InternalError( InternalValidationError::CouldNotOpenFile(err.to_string()), ); send_response(&mut stream, response)?; @@ -165,82 +175,51 @@ pub fn worker_entrypoint( }, }; - // Conditional variable to notify us when a thread is done. - let condvar = thread::get_condvar(); + let (pipe_reader, pipe_writer) = os_pipe::pipe()?; - let cpu_time_start = ProcessTime::now(); + let usage_before = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { + Ok(usage) => usage, + Err(errno) => { + let response = internal_error_from_errno("getrusage before", errno); + send_response(&mut stream, response)?; + continue + }, + }; + + // SAFETY: new process is spawned within a single threaded process. This invariant + // is enforced by tests. + let response = match unsafe { nix::unistd::fork() } { + Err(errno) => internal_error_from_errno("fork", errno), + Ok(ForkResult::Child) => { + // Dropping the stream closes the underlying socket. We want to make sure + // that the sandboxed child can't get any kind of information from the + // outside world. The only IPC it should be able to do is sending its + // response over the pipe. + drop(stream); + // Drop the read end so we don't have too many FDs open. + drop(pipe_reader); - // Spawn a new thread that runs the CPU time monitor. - let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); - let cpu_time_monitor_thread = thread::spawn_worker_thread( - "cpu time monitor thread", - move || { - cpu_time_monitor_loop( - cpu_time_start, + handle_child_process( + pipe_writer, + compiled_artifact_blob, + executor_params, + params, execution_timeout, - cpu_time_monitor_rx, - ) - }, - Arc::clone(&condvar), - WaitOutcome::TimedOut, - )?; - - let executor_params_2 = executor_params.clone(); - let execute_thread = thread::spawn_worker_thread_with_stack_size( - "execute thread", - move || { - validate_using_artifact( - &compiled_artifact_blob, - &executor_params_2, - ¶ms, - cpu_time_start, ) }, - Arc::clone(&condvar), - WaitOutcome::Finished, - EXECUTE_THREAD_STACK_SIZE, - )?; - - let outcome = thread::wait_for_threads(condvar); - - let response = match outcome { - WaitOutcome::Finished => { - let _ = cpu_time_monitor_tx.send(()); - execute_thread - .join() - .unwrap_or_else(|e| Response::Panic(stringify_panic_payload(e))) - }, - // If the CPU thread is not selected, we signal it to end, the join handle is - // dropped and the thread will finish in the background. - WaitOutcome::TimedOut => { - match cpu_time_monitor_thread.join() { - Ok(Some(cpu_time_elapsed)) => { - // Log if we exceed the timeout and the other thread hasn't - // finished. - gum::warn!( - target: LOG_TARGET, - %worker_pid, - "execute job took {}ms cpu time, exceeded execute timeout {}ms", - cpu_time_elapsed.as_millis(), - execution_timeout.as_millis(), - ); - Response::TimedOut - }, - Ok(None) => Response::InternalError( - InternalValidationError::CpuTimeMonitorThread( - "error communicating over finished channel".into(), - ), - ), - Err(e) => Response::InternalError( - InternalValidationError::CpuTimeMonitorThread( - stringify_panic_payload(e), - ), - ), - } + Ok(ForkResult::Parent { child }) => { + // the read end will wait until all write ends have been closed, + // this drop is necessary to avoid deadlock + drop(pipe_writer); + + handle_parent_process( + pipe_reader, + child, + worker_pid, + usage_before, + execution_timeout, + )? }, - WaitOutcome::Pending => unreachable!( - "we run wait_while until the outcome is no longer pending; qed" - ), }; gum::trace!( @@ -259,27 +238,275 @@ fn validate_using_artifact( compiled_artifact_blob: &[u8], executor_params: &ExecutorParams, params: &[u8], - cpu_time_start: ProcessTime, -) -> Response { +) -> JobResponse { let descriptor_bytes = match unsafe { // SAFETY: this should be safe since the compiled artifact passed here comes from the // file created by the prepare workers. These files are obtained by calling // [`executor_intf::prepare`]. execute_artifact(compiled_artifact_blob, executor_params, params) } { - Err(err) => return Response::format_invalid("execute", &err), + Err(err) => return JobResponse::format_invalid("execute", &err), Ok(d) => d, }; let result_descriptor = match ValidationResult::decode(&mut &descriptor_bytes[..]) { Err(err) => - return Response::format_invalid("validation result decoding failed", &err.to_string()), + return JobResponse::format_invalid( + "validation result decoding failed", + &err.to_string(), + ), Ok(r) => r, }; - // Include the decoding in the measured time, to prevent any potential attacks exploiting some - // bug in decoding. - let duration = cpu_time_start.elapsed(); + JobResponse::Ok { result_descriptor } +} + +/// This is used to handle child process during pvf execute worker. +/// It execute the artifact and pipes back the response to the parent process +/// +/// # Arguments +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `compiled_artifact_blob`: The artifact bytes from compiled by the prepare worker`. +/// +/// - `executor_params`: Deterministically serialized execution environment semantics. +/// +/// - `params`: Validation parameters. +/// +/// - `execution_timeout`: The timeout in `Duration`. +/// +/// # Returns +/// +/// - pipe back `JobResponse` to the parent process. +fn handle_child_process( + mut pipe_write: PipeWriter, + compiled_artifact_blob: Vec, + executor_params: ExecutorParams, + params: Vec, + execution_timeout: Duration, +) -> ! { + gum::debug!( + target: LOG_TARGET, + worker_job_pid = %process::id(), + "worker job: executing artifact", + ); + + // Conditional variable to notify us when a thread is done. + let condvar = thread::get_condvar(); + let cpu_time_start = ProcessTime::now(); + + // Spawn a new thread that runs the CPU time monitor. + let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); + let cpu_time_monitor_thread = thread::spawn_worker_thread( + "cpu time monitor thread", + move || cpu_time_monitor_loop(cpu_time_start, execution_timeout, cpu_time_monitor_rx), + Arc::clone(&condvar), + WaitOutcome::TimedOut, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(JobError::CouldNotSpawnThread(err.to_string()))) + }); + + let executor_params_2 = executor_params.clone(); + let execute_thread = thread::spawn_worker_thread_with_stack_size( + "execute thread", + move || validate_using_artifact(&compiled_artifact_blob, &executor_params_2, ¶ms), + Arc::clone(&condvar), + WaitOutcome::Finished, + EXECUTE_THREAD_STACK_SIZE, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(JobError::CouldNotSpawnThread(err.to_string()))) + }); + + let outcome = thread::wait_for_threads(condvar); + + let response = match outcome { + WaitOutcome::Finished => { + let _ = cpu_time_monitor_tx.send(()); + execute_thread.join().map_err(|e| JobError::Panic(stringify_panic_payload(e))) + }, + // If the CPU thread is not selected, we signal it to end, the join handle is + // dropped and the thread will finish in the background. + WaitOutcome::TimedOut => match cpu_time_monitor_thread.join() { + Ok(Some(_cpu_time_elapsed)) => Err(JobError::TimedOut), + Ok(None) => Err(JobError::CpuTimeMonitorThread( + "error communicating over finished channel".into(), + )), + Err(e) => Err(JobError::CpuTimeMonitorThread(stringify_panic_payload(e))), + }, + WaitOutcome::Pending => + unreachable!("we run wait_while until the outcome is no longer pending; qed"), + }; + + send_child_response(&mut pipe_write, response); +} + +/// Waits for child process to finish and handle child response from pipe. +/// +/// # Arguments +/// +/// - `pipe_read`: A `PipeReader` used to read data from the child process. +/// +/// - `child`: The child pid. +/// +/// - `usage_before`: Resource usage statistics before executing the child process. +/// +/// - `timeout`: The maximum allowed time for the child process to finish, in `Duration`. +/// +/// # Returns +/// +/// - The response, either `Ok` or some error state. +fn handle_parent_process( + mut pipe_read: PipeReader, + child: Pid, + worker_pid: u32, + usage_before: Usage, + timeout: Duration, +) -> io::Result { + // Read from the child. Don't decode unless the process exited normally, which we check later. + let mut received_data = Vec::new(); + pipe_read + .read_to_end(&mut received_data) + // Could not decode job response. There is either a bug or the job was hijacked. + // Should retry at any rate. + .map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?; + + let status = nix::sys::wait::waitpid(child, None); + gum::trace!( + target: LOG_TARGET, + %worker_pid, + "execute worker received wait status from job: {:?}", + status, + ); + + let usage_after = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { + Ok(usage) => usage, + Err(errno) => return Ok(internal_error_from_errno("getrusage after", errno)), + }; + + // Using `getrusage` is needed to check whether child has timedout since we cannot rely on + // child to report its own time. + // As `getrusage` returns resource usage from all terminated child processes, + // it is necessary to subtract the usage before the current child process to isolate its cpu + // time + let cpu_tv = get_total_cpu_usage(usage_after) - get_total_cpu_usage(usage_before); + if cpu_tv >= timeout { + gum::warn!( + target: LOG_TARGET, + %worker_pid, + "execute job took {}ms cpu time, exceeded execute timeout {}ms", + cpu_tv.as_millis(), + timeout.as_millis(), + ); + return Ok(WorkerResponse::JobTimedOut) + } + + match status { + Ok(WaitStatus::Exited(_, exit_status)) => { + let mut reader = io::BufReader::new(received_data.as_slice()); + let result = match recv_child_response(&mut reader) { + Ok(result) => result, + Err(err) => return Ok(WorkerResponse::JobError(err.to_string())), + }; + + match result { + Ok(JobResponse::Ok { result_descriptor }) => { + // The exit status should have been zero if no error occurred. + if exit_status != 0 { + return Ok(WorkerResponse::JobError(format!( + "unexpected exit status: {}", + exit_status + ))) + } + + Ok(WorkerResponse::Ok { result_descriptor, duration: cpu_tv }) + }, + Ok(JobResponse::InvalidCandidate(err)) => Ok(WorkerResponse::InvalidCandidate(err)), + Err(job_error) => { + gum::warn!( + target: LOG_TARGET, + %worker_pid, + "execute job error: {}", + job_error, + ); + if matches!(job_error, JobError::TimedOut) { + Ok(WorkerResponse::JobTimedOut) + } else { + Ok(WorkerResponse::JobError(job_error.to_string())) + } + }, + } + }, + // The job was killed by the given signal. + // + // The job gets SIGSYS on seccomp violations, but this signal may have been sent for some + // other reason, so we still need to check for seccomp violations elsewhere. + Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => + Ok(WorkerResponse::JobDied(format!("received signal: {signal:?}"))), + Err(errno) => Ok(internal_error_from_errno("waitpid", errno)), + + // It is within an attacker's power to send an unexpected exit status. So we cannot treat + // this as an internal error (which would make us abstain), but must vote against. + Ok(unexpected_wait_status) => Ok(WorkerResponse::JobDied(format!( + "unexpected status from wait: {unexpected_wait_status:?}" + ))), + } +} + +/// Calculate the total CPU time from the given `usage` structure, returned from +/// [`nix::sys::resource::getrusage`], and calculates the total CPU time spent, including both user +/// and system time. +/// +/// # Arguments +/// +/// - `rusage`: Contains resource usage information. +/// +/// # Returns +/// +/// Returns a `Duration` representing the total CPU time. +fn get_total_cpu_usage(rusage: Usage) -> Duration { + let micros = (((rusage.user_time().tv_sec() + rusage.system_time().tv_sec()) * 1_000_000) + + (rusage.system_time().tv_usec() + rusage.user_time().tv_usec()) as i64) as u64; + + return Duration::from_micros(micros) +} + +/// Get a job response. +fn recv_child_response(received_data: &mut io::BufReader<&[u8]>) -> io::Result { + let response_bytes = framed_recv_blocking(received_data)?; + JobResult::decode(&mut response_bytes.as_slice()).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("execute pvf recv_child_response: decode error: {:?}", e), + ) + }) +} + +/// Write response to the pipe and exit process after. +/// +/// # Arguments +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `response`: Child process response, or error. +fn send_child_response(pipe_write: &mut PipeWriter, response: JobResult) -> ! { + framed_send_blocking(pipe_write, response.encode().as_slice()) + .unwrap_or_else(|_| process::exit(libc::EXIT_FAILURE)); + + if response.is_ok() { + process::exit(libc::EXIT_SUCCESS) + } else { + process::exit(libc::EXIT_FAILURE) + } +} - Response::Ok { result_descriptor, duration } +fn internal_error_from_errno(context: &'static str, errno: Errno) -> WorkerResponse { + WorkerResponse::InternalError(InternalValidationError::Kernel(format!( + "{}: {}: {}", + context, + errno, + io::Error::last_os_error() + ))) } diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index e21583ecc8b..1cd221533f4 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -14,6 +14,8 @@ rayon = "1.5.1" tracking-allocator = { package = "staging-tracking-allocator", path = "../../../tracking-allocator" } tikv-jemalloc-ctl = { version = "0.5.0", optional = true } tikv-jemallocator = { version = "0.5.0", optional = true } +os_pipe = "1.1.4" +nix = { version = "0.27.1", features = ["resource", "process"]} parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs index 37a4dd06075..151b54efc2d 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs @@ -28,28 +28,40 @@ const LOG_TARGET: &str = "parachain::pvf-prepare-worker"; use crate::memory_stats::max_rss_stat::{extract_max_rss_stat, get_max_rss_thread}; #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] use crate::memory_stats::memory_tracker::{get_memory_tracker_loop_stats, memory_tracker_loop}; +use libc; +use nix::{ + errno::Errno, + sys::{ + resource::{Usage, UsageWho}, + wait::WaitStatus, + }, + unistd::{ForkResult, Pid}, +}; +use os_pipe::{self, PipeReader, PipeWriter}; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ - error::{PrepareError, PrepareResult, OOM_PAYLOAD}, + error::{PrepareError, PrepareResult}, executor_intf::create_runtime_from_artifact_bytes, framed_recv_blocking, framed_send_blocking, prepare::{MemoryStats, PrepareJobKind, PrepareStats}, pvf::PvfPrepData, worker::{ - cpu_time_monitor_loop, stringify_panic_payload, - thread::{self, WaitOutcome}, - worker_event_loop, WorkerKind, + cpu_time_monitor_loop, run_worker, stringify_panic_payload, + thread::{self, spawn_worker_thread, WaitOutcome}, + WorkerKind, }, worker_dir, ProcessTime, SecurityStatus, }; use polkadot_primitives::ExecutorParams; use std::{ - fs, io, + fs, + io::{self, Read}, os::{ fd::{AsRawFd, RawFd}, unix::net::UnixStream, }, path::PathBuf, + process, sync::{mpsc::channel, Arc}, time::Duration, }; @@ -65,6 +77,7 @@ static ALLOC: TrackingAllocator = static ALLOC: TrackingAllocator = TrackingAllocator(std::alloc::System); /// Contains the bytes for a successfully compiled artifact. +#[derive(Encode, Decode)] pub struct CompiledArtifact(Vec); impl CompiledArtifact { @@ -80,6 +93,7 @@ impl AsRef<[u8]> for CompiledArtifact { } } +/// Get a worker request. fn recv_request(stream: &mut UnixStream) -> io::Result { let pvf = framed_recv_blocking(stream)?; let pvf = PvfPrepData::decode(&mut &pvf[..]).map_err(|e| { @@ -91,6 +105,7 @@ fn recv_request(stream: &mut UnixStream) -> io::Result { Ok(pvf) } +/// Send a worker response. fn send_response(stream: &mut UnixStream, result: PrepareResult) -> io::Result<()> { framed_send_blocking(stream, &result.encode()) } @@ -111,18 +126,22 @@ fn start_memory_tracking(fd: RawFd, limit: Option) { // Syscalls never allocate or deallocate, so this is safe. libc::syscall(libc::SYS_write, fd, OOM_PAYLOAD.as_ptr(), OOM_PAYLOAD.len()); libc::syscall(libc::SYS_close, fd); - libc::syscall(libc::SYS_exit, 1); + // Make sure we exit from all threads. Copied from glibc. + libc::syscall(libc::SYS_exit_group, 1); + loop { + libc::syscall(libc::SYS_exit, 1); + } } #[cfg(not(target_os = "linux"))] { // Syscalls are not available on MacOS, so we have to use `libc` wrappers. - // Technicaly, there may be allocations inside, although they shouldn't be + // Technically, there may be allocations inside, although they shouldn't be // there. In that case, we'll see deadlocks on MacOS after the OOM condition // triggered. As we consider running a validator on MacOS unsafe, and this // code is only run by a validator, it's a lesser evil. libc::write(fd, OOM_PAYLOAD.as_ptr().cast(), OOM_PAYLOAD.len()); libc::close(fd); - std::process::exit(1); + libc::_exit(1); } })), ); @@ -155,17 +174,19 @@ fn end_memory_tracking() -> isize { /// /// 1. Get the code and parameters for preparation from the host. /// -/// 2. Start a memory tracker in a separate thread. +/// 2. Start a new child process /// -/// 3. Start the CPU time monitor loop and the actual preparation in two separate threads. +/// 3. Start the memory tracker and the actual preparation in two separate threads. /// /// 4. Wait on the two threads created in step 3. /// /// 5. Stop the memory tracker and get the stats. /// -/// 6. If compilation succeeded, write the compiled artifact into a temporary file. +/// 6. Pipe the result back to the parent process and exit from child process. /// -/// 7. Send the result of preparation back to the host. If any error occurred in the above steps, we +/// 7. If compilation succeeded, write the compiled artifact into a temporary file. +/// +/// 8. Send the result of preparation back to the host. If any error occurred in the above steps, we /// send that in the `PrepareResult`. pub fn worker_entrypoint( socket_path: PathBuf, @@ -174,7 +195,7 @@ pub fn worker_entrypoint( worker_version: Option<&str>, security_status: SecurityStatus, ) { - worker_event_loop( + run_worker( WorkerKind::Prepare, socket_path, worker_dir_path, @@ -182,7 +203,7 @@ pub fn worker_entrypoint( worker_version, &security_status, |mut stream, worker_dir_path| { - let worker_pid = std::process::id(); + let worker_pid = process::id(); let temp_artifact_dest = worker_dir::prepare_tmp_artifact(&worker_dir_path); loop { @@ -197,186 +218,58 @@ pub fn worker_entrypoint( let prepare_job_kind = pvf.prep_kind(); let executor_params = pvf.executor_params(); - // Conditional variable to notify us when a thread is done. - let condvar = thread::get_condvar(); - - // Run the memory tracker in a regular, non-worker thread. - #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] - let condvar_memory = Arc::clone(&condvar); - #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] - let memory_tracker_thread = std::thread::spawn(|| memory_tracker_loop(condvar_memory)); + let (pipe_reader, pipe_writer) = os_pipe::pipe()?; - let cpu_time_start = ProcessTime::now(); - - // Spawn a new thread that runs the CPU time monitor. - let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); - let cpu_time_monitor_thread = thread::spawn_worker_thread( - "cpu time monitor thread", - move || { - cpu_time_monitor_loop( - cpu_time_start, - preparation_timeout, - cpu_time_monitor_rx, - ) + let usage_before = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { + Ok(usage) => usage, + Err(errno) => { + let result = Err(error_from_errno("getrusage before", errno)); + send_response(&mut stream, result)?; + continue }, - Arc::clone(&condvar), - WaitOutcome::TimedOut, - )?; - - start_memory_tracking( - stream.as_raw_fd(), - executor_params.prechecking_max_memory().map(|v| { - v.try_into().unwrap_or_else(|_| { - gum::warn!( - LOG_TARGET, - %worker_pid, - "Illegal pre-checking max memory value {} discarded", - v, - ); - 0 - }) - }), - ); - - // Spawn another thread for preparation. - let prepare_thread = thread::spawn_worker_thread( - "prepare thread", - move || { - #[allow(unused_mut)] - let mut result = prepare_artifact(pvf, cpu_time_start); - - // Get the `ru_maxrss` stat. If supported, call getrusage for the thread. - #[cfg(target_os = "linux")] - let mut result = result - .map(|(artifact, elapsed)| (artifact, elapsed, get_max_rss_thread())); - - // If we are pre-checking, check for runtime construction errors. - // - // As pre-checking is more strict than just preparation in terms of memory - // and time, it is okay to do extra checks here. This takes negligible time - // anyway. - if let PrepareJobKind::Prechecking = prepare_job_kind { - result = result.and_then(|output| { - runtime_construction_check( - output.0.as_ref(), - executor_params.as_ref(), - )?; - Ok(output) - }); - } - - result - }, - Arc::clone(&condvar), - WaitOutcome::Finished, - )?; - - let outcome = thread::wait_for_threads(condvar); - - let peak_alloc = { - let peak = end_memory_tracking(); - gum::debug!( - target: LOG_TARGET, - %worker_pid, - "prepare job peak allocation is {} bytes", - peak, - ); - peak }; - let result = match outcome { - WaitOutcome::Finished => { - let _ = cpu_time_monitor_tx.send(()); - - match prepare_thread.join().unwrap_or_else(|err| { - Err(PrepareError::Panic(stringify_panic_payload(err))) - }) { - Err(err) => { - // Serialized error will be written into the socket. - Err(err) - }, - Ok(ok) => { - cfg_if::cfg_if! { - if #[cfg(target_os = "linux")] { - let (artifact, cpu_time_elapsed, max_rss) = ok; - } else { - let (artifact, cpu_time_elapsed) = ok; - } - } - - // Stop the memory stats worker and get its observed memory stats. - #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] - let memory_tracker_stats = get_memory_tracker_loop_stats(memory_tracker_thread, worker_pid); - let memory_stats = MemoryStats { - #[cfg(any( - target_os = "linux", - feature = "jemalloc-allocator" - ))] - memory_tracker_stats, - #[cfg(target_os = "linux")] - max_rss: extract_max_rss_stat(max_rss, worker_pid), - // Negative peak allocation values are legit; they are narrow - // corner cases and shouldn't affect overall statistics - // significantly - peak_tracked_alloc: if peak_alloc > 0 { - peak_alloc as u64 - } else { - 0u64 - }, - }; - - // Write the serialized artifact into a temp file. - // - // PVF host only keeps artifacts statuses in its memory, - // successfully compiled code gets stored on the disk (and - // consequently deserialized by execute-workers). The prepare worker - // is only required to send `Ok` to the pool to indicate the - // success. - - gum::debug!( - target: LOG_TARGET, - %worker_pid, - "worker: writing artifact to {}", - temp_artifact_dest.display(), - ); - fs::write(&temp_artifact_dest, &artifact)?; - - Ok(PrepareStats { cpu_time_elapsed, memory_stats }) - }, - } + // SAFETY: new process is spawned within a single threaded process. This invariant + // is enforced by tests. + let result = match unsafe { nix::unistd::fork() } { + Err(errno) => Err(error_from_errno("fork", errno)), + Ok(ForkResult::Child) => { + // Dropping the stream closes the underlying socket. We want to make sure + // that the sandboxed child can't get any kind of information from the + // outside world. The only IPC it should be able to do is sending its + // response over the pipe. + drop(stream); + // Drop the read end so we don't have too many FDs open. + drop(pipe_reader); + + handle_child_process( + pvf, + pipe_writer, + preparation_timeout, + prepare_job_kind, + executor_params, + ) }, - // If the CPU thread is not selected, we signal it to end, the join handle is - // dropped and the thread will finish in the background. - WaitOutcome::TimedOut => { - match cpu_time_monitor_thread.join() { - Ok(Some(cpu_time_elapsed)) => { - // Log if we exceed the timeout and the other thread hasn't - // finished. - gum::warn!( - target: LOG_TARGET, - %worker_pid, - "prepare job took {}ms cpu time, exceeded prepare timeout {}ms", - cpu_time_elapsed.as_millis(), - preparation_timeout.as_millis(), - ); - Err(PrepareError::TimedOut) - }, - Ok(None) => Err(PrepareError::IoErr( - "error communicating over closed channel".into(), - )), - // Errors in this thread are independent of the PVF. - Err(err) => Err(PrepareError::IoErr(stringify_panic_payload(err))), - } + Ok(ForkResult::Parent { child }) => { + // the read end will wait until all write ends have been closed, + // this drop is necessary to avoid deadlock + drop(pipe_writer); + + handle_parent_process( + pipe_reader, + child, + temp_artifact_dest.clone(), + worker_pid, + usage_before, + preparation_timeout, + ) }, - WaitOutcome::Pending => unreachable!( - "we run wait_while until the outcome is no longer pending; qed" - ), }; gum::trace!( target: LOG_TARGET, %worker_pid, - "worker: sending response to host: {:?}", + "worker: sending result to host: {:?}", result ); send_response(&mut stream, result)?; @@ -385,10 +278,7 @@ pub fn worker_entrypoint( ); } -fn prepare_artifact( - pvf: PvfPrepData, - cpu_time_start: ProcessTime, -) -> Result<(CompiledArtifact, Duration), PrepareError> { +fn prepare_artifact(pvf: PvfPrepData) -> Result { let blob = match prevalidate(&pvf.code()) { Err(err) => return Err(PrepareError::Prevalidation(format!("{:?}", err))), Ok(b) => b, @@ -398,7 +288,6 @@ fn prepare_artifact( Ok(compiled_artifact) => Ok(CompiledArtifact::new(compiled_artifact)), Err(err) => Err(PrepareError::Preparation(format!("{:?}", err))), } - .map(|artifact| (artifact, cpu_time_start.elapsed())) } /// Try constructing the runtime to catch any instantiation errors during pre-checking. @@ -412,3 +301,372 @@ fn runtime_construction_check( .map(|_runtime| ()) .map_err(|err| PrepareError::RuntimeConstruction(format!("{:?}", err))) } + +#[derive(Encode, Decode)] +struct JobResponse { + artifact: CompiledArtifact, + memory_stats: MemoryStats, +} + +/// This is used to handle child process during pvf prepare worker. +/// It prepares the artifact and tracks memory stats during preparation +/// and pipes back the response to the parent process +/// +/// # Arguments +/// +/// - `pvf`: `PvfPrepData` structure, containing data to prepare the artifact +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `preparation_timeout`: The timeout in `Duration`. +/// +/// - `prepare_job_kind`: The kind of prepare job. +/// +/// - `executor_params`: Deterministically serialized execution environment semantics. +/// +/// # Returns +/// +/// - If any error occur, pipe response back with `PrepareError`. +/// +/// - If success, pipe back `JobResponse`. +fn handle_child_process( + pvf: PvfPrepData, + mut pipe_write: PipeWriter, + preparation_timeout: Duration, + prepare_job_kind: PrepareJobKind, + executor_params: Arc, +) -> ! { + let worker_job_pid = process::id(); + gum::debug!( + target: LOG_TARGET, + %worker_job_pid, + ?prepare_job_kind, + ?preparation_timeout, + "worker job: preparing artifact", + ); + + // Conditional variable to notify us when a thread is done. + let condvar = thread::get_condvar(); + + // Run the memory tracker in a regular, non-worker thread. + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + let condvar_memory = Arc::clone(&condvar); + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + let memory_tracker_thread = std::thread::spawn(|| memory_tracker_loop(condvar_memory)); + + start_memory_tracking( + pipe_write.as_raw_fd(), + executor_params.prechecking_max_memory().map(|v| { + v.try_into().unwrap_or_else(|_| { + gum::warn!( + LOG_TARGET, + %worker_job_pid, + "Illegal pre-checking max memory value {} discarded", + v, + ); + 0 + }) + }), + ); + + let cpu_time_start = ProcessTime::now(); + + // Spawn a new thread that runs the CPU time monitor. + let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); + let cpu_time_monitor_thread = thread::spawn_worker_thread( + "cpu time monitor thread", + move || cpu_time_monitor_loop(cpu_time_start, preparation_timeout, cpu_time_monitor_rx), + Arc::clone(&condvar), + WaitOutcome::TimedOut, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(PrepareError::IoErr(err.to_string()))) + }); + + let prepare_thread = spawn_worker_thread( + "prepare worker", + move || { + #[allow(unused_mut)] + let mut result = prepare_artifact(pvf); + + // Get the `ru_maxrss` stat. If supported, call getrusage for the thread. + #[cfg(target_os = "linux")] + let mut result = result.map(|artifact| (artifact, get_max_rss_thread())); + + // If we are pre-checking, check for runtime construction errors. + // + // As pre-checking is more strict than just preparation in terms of memory + // and time, it is okay to do extra checks here. This takes negligible time + // anyway. + if let PrepareJobKind::Prechecking = prepare_job_kind { + result = result.and_then(|output| { + runtime_construction_check(output.0.as_ref(), &executor_params)?; + Ok(output) + }); + } + result + }, + Arc::clone(&condvar), + WaitOutcome::Finished, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(PrepareError::IoErr(err.to_string()))) + }); + + let outcome = thread::wait_for_threads(condvar); + + let peak_alloc = { + let peak = end_memory_tracking(); + gum::debug!( + target: LOG_TARGET, + %worker_job_pid, + "prepare job peak allocation is {} bytes", + peak, + ); + peak + }; + + let result = match outcome { + WaitOutcome::Finished => { + let _ = cpu_time_monitor_tx.send(()); + + match prepare_thread.join().unwrap_or_else(|err| { + send_child_response( + &mut pipe_write, + Err(PrepareError::JobError(stringify_panic_payload(err))), + ) + }) { + Err(err) => Err(err), + Ok(ok) => { + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + let (artifact, max_rss) = ok; + } else { + let artifact = ok; + } + } + + // Stop the memory stats worker and get its observed memory stats. + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + let memory_tracker_stats = get_memory_tracker_loop_stats(memory_tracker_thread, process::id()); + + let memory_stats = MemoryStats { + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + memory_tracker_stats, + #[cfg(target_os = "linux")] + max_rss: extract_max_rss_stat(max_rss, process::id()), + // Negative peak allocation values are legit; they are narrow + // corner cases and shouldn't affect overall statistics + // significantly + peak_tracked_alloc: if peak_alloc > 0 { peak_alloc as u64 } else { 0u64 }, + }; + + Ok(JobResponse { artifact, memory_stats }) + }, + } + }, + + // If the CPU thread is not selected, we signal it to end, the join handle is + // dropped and the thread will finish in the background. + WaitOutcome::TimedOut => match cpu_time_monitor_thread.join() { + Ok(Some(_cpu_time_elapsed)) => Err(PrepareError::TimedOut), + Ok(None) => Err(PrepareError::IoErr("error communicating over closed channel".into())), + Err(err) => Err(PrepareError::IoErr(stringify_panic_payload(err))), + }, + WaitOutcome::Pending => + unreachable!("we run wait_while until the outcome is no longer pending; qed"), + }; + + send_child_response(&mut pipe_write, result); +} + +/// Waits for child process to finish and handle child response from pipe. +/// +/// # Arguments +/// +/// - `pipe_read`: A `PipeReader` used to read data from the child process. +/// +/// - `child`: The child pid. +/// +/// - `temp_artifact_dest`: The destination `PathBuf` to write the temporary artifact file. +/// +/// - `worker_pid`: The PID of the child process. +/// +/// - `usage_before`: Resource usage statistics before executing the child process. +/// +/// - `timeout`: The maximum allowed time for the child process to finish, in `Duration`. +/// +/// # Returns +/// +/// - If the child send response without an error, this function returns `Ok(PrepareStats)` +/// containing memory and CPU usage statistics. +/// +/// - If the child send response with an error, it returns a `PrepareError` with that error. +/// +/// - If the child process timeout, it returns `PrepareError::TimedOut`. +fn handle_parent_process( + mut pipe_read: PipeReader, + child: Pid, + temp_artifact_dest: PathBuf, + worker_pid: u32, + usage_before: Usage, + timeout: Duration, +) -> Result { + // Read from the child. Don't decode unless the process exited normally, which we check later. + let mut received_data = Vec::new(); + pipe_read + .read_to_end(&mut received_data) + .map_err(|err| PrepareError::IoErr(err.to_string()))?; + + let status = nix::sys::wait::waitpid(child, None); + gum::trace!( + target: LOG_TARGET, + %worker_pid, + "prepare worker received wait status from job: {:?}", + status, + ); + + let usage_after = nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) + .map_err(|errno| error_from_errno("getrusage after", errno))?; + + // Using `getrusage` is needed to check whether child has timedout since we cannot rely on + // child to report its own time. + // As `getrusage` returns resource usage from all terminated child processes, + // it is necessary to subtract the usage before the current child process to isolate its cpu + // time + let cpu_tv = get_total_cpu_usage(usage_after) - get_total_cpu_usage(usage_before); + if cpu_tv >= timeout { + gum::warn!( + target: LOG_TARGET, + %worker_pid, + "prepare job took {}ms cpu time, exceeded prepare timeout {}ms", + cpu_tv.as_millis(), + timeout.as_millis(), + ); + return Err(PrepareError::TimedOut) + } + + match status { + Ok(WaitStatus::Exited(_pid, exit_status)) => { + let mut reader = io::BufReader::new(received_data.as_slice()); + let result = recv_child_response(&mut reader) + .map_err(|err| PrepareError::JobError(err.to_string()))?; + + match result { + Err(err) => Err(err), + Ok(response) => { + // The exit status should have been zero if no error occurred. + if exit_status != 0 { + return Err(PrepareError::JobError(format!( + "unexpected exit status: {}", + exit_status + ))) + } + + // Write the serialized artifact into a temp file. + // + // PVF host only keeps artifacts statuses in its memory, + // successfully compiled code gets stored on the disk (and + // consequently deserialized by execute-workers). The prepare worker + // is only required to send `Ok` to the pool to indicate the + // success. + gum::debug!( + target: LOG_TARGET, + %worker_pid, + "worker: writing artifact to {}", + temp_artifact_dest.display(), + ); + // Write to the temp file created by the host. + if let Err(err) = fs::write(&temp_artifact_dest, &response.artifact) { + return Err(PrepareError::IoErr(err.to_string())) + }; + + Ok(PrepareStats { + memory_stats: response.memory_stats, + cpu_time_elapsed: cpu_tv, + }) + }, + } + }, + // The job was killed by the given signal. + // + // The job gets SIGSYS on seccomp violations, but this signal may have been sent for some + // other reason, so we still need to check for seccomp violations elsewhere. + Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => + Err(PrepareError::JobDied(format!("received signal: {signal:?}"))), + Err(errno) => Err(error_from_errno("waitpid", errno)), + + // An attacker can make the child process return any exit status it wants. So we can treat + // all unexpected cases the same way. + Ok(unexpected_wait_status) => Err(PrepareError::JobDied(format!( + "unexpected status from wait: {unexpected_wait_status:?}" + ))), + } +} + +/// Calculate the total CPU time from the given `usage` structure, returned from +/// [`nix::sys::resource::getrusage`], and calculates the total CPU time spent, including both user +/// and system time. +/// +/// # Arguments +/// +/// - `rusage`: Contains resource usage information. +/// +/// # Returns +/// +/// Returns a `Duration` representing the total CPU time. +fn get_total_cpu_usage(rusage: Usage) -> Duration { + let micros = (((rusage.user_time().tv_sec() + rusage.system_time().tv_sec()) * 1_000_000) + + (rusage.system_time().tv_usec() + rusage.user_time().tv_usec()) as i64) as u64; + + return Duration::from_micros(micros) +} + +/// Get a job response. +fn recv_child_response(received_data: &mut io::BufReader<&[u8]>) -> io::Result { + let response_bytes = framed_recv_blocking(received_data)?; + JobResult::decode(&mut response_bytes.as_slice()).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("prepare pvf recv_child_response: decode error: {:?}", e), + ) + }) +} + +/// Write a job response to the pipe and exit process after. +/// +/// # Arguments +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `response`: Child process response +fn send_child_response(pipe_write: &mut PipeWriter, response: JobResult) -> ! { + framed_send_blocking(pipe_write, response.encode().as_slice()) + .unwrap_or_else(|_| process::exit(libc::EXIT_FAILURE)); + + if response.is_ok() { + process::exit(libc::EXIT_SUCCESS) + } else { + process::exit(libc::EXIT_FAILURE) + } +} + +fn error_from_errno(context: &'static str, errno: Errno) -> PrepareError { + PrepareError::Kernel(format!("{}: {}: {}", context, errno, io::Error::last_os_error())) +} + +type JobResult = Result; + +/// Pre-encoded length-prefixed `Result::Err(PrepareError::OutOfMemory)` +const OOM_PAYLOAD: &[u8] = b"\x02\x00\x00\x00\x00\x00\x00\x00\x01\x08"; + +#[test] +fn pre_encoded_payloads() { + // NOTE: This must match the type of `response` in `send_child_response`. + let oom_unencoded: JobResult = Result::Err(PrepareError::OutOfMemory); + let oom_encoded = oom_unencoded.encode(); + // The payload is prefixed with its length in `framed_send`. + let mut oom_payload = oom_encoded.len().to_le_bytes().to_vec(); + oom_payload.extend(oom_encoded); + assert_eq!(oom_payload, OOM_PAYLOAD); +} diff --git a/polkadot/node/core/pvf/src/error.rs b/polkadot/node/core/pvf/src/error.rs index 87ef0b54a04..7fdb8c56ec9 100644 --- a/polkadot/node/core/pvf/src/error.rs +++ b/polkadot/node/core/pvf/src/error.rs @@ -33,36 +33,37 @@ pub enum ValidationError { pub enum InvalidCandidate { /// PVF preparation ended up with a deterministic error. PrepareError(String), - /// The failure is reported by the execution worker. The string contains the error message. - WorkerReportedError(String), - /// The worker has died during validation of a candidate. That may fall in one of the following - /// categories, which we cannot distinguish programmatically: + /// The candidate is reported to be invalid by the execution worker. The string contains the + /// error message. + WorkerReportedInvalid(String), + /// The worker process (not the job) has died during validation of a candidate. /// - /// (a) Some sort of transient glitch caused the worker process to abort. An example would be - /// that the host machine ran out of free memory and the OOM killer started killing the - /// processes, and in order to save the parent it will "sacrifice child" first. - /// - /// (b) The candidate triggered a code path that has lead to the process death. For example, - /// the PVF found a way to consume unbounded amount of resources and then it either - /// exceeded an `rlimit` (if set) or, again, invited OOM killer. Another possibility is a - /// bug in wasmtime allowed the PVF to gain control over the execution worker. - /// - /// We attribute such an event to an *invalid candidate* in either case. - /// - /// The rationale for this is that a glitch may lead to unfair rejecting candidate by a single - /// validator. If the glitch is somewhat more persistent the validator will reject all - /// candidate thrown at it and hopefully the operator notices it by decreased reward - /// performance of the validator. On the other hand, if the worker died because of (b) we would - /// have better chances to stop the attack. + /// It's unlikely that this is caused by malicious code since workers spawn separate job + /// processes, and those job processes are sandboxed. But, it is possible. We retry in this + /// case, and if the error persists, we assume it's caused by the candidate and vote against. AmbiguousWorkerDeath, /// PVF execution (compilation is not included) took more time than was allotted. HardTimeout, - /// A panic occurred and we can't be sure whether the candidate is really invalid or some - /// internal glitch occurred. Whenever we are unsure, we can never treat an error as internal - /// as we would abstain from voting. This is bad because if the issue was due to the candidate, - /// then all validators would abstain, stalling finality on the chain. So we will first retry - /// the candidate, and if the issue persists we are forced to vote invalid. - Panic(String), + /// The job process (not the worker) has died for one of the following reasons: + /// + /// (a) A seccomp violation occurred, most likely due to an attempt by malicious code to + /// execute arbitrary code. Note that there is no foolproof way to detect this if the operator + /// has seccomp auditing disabled. + /// + /// (b) The host machine ran out of free memory and the OOM killer started killing the + /// processes, and in order to save the parent it will "sacrifice child" first. + /// + /// (c) Some other reason, perhaps transient or perhaps caused by malicious code. + /// + /// We cannot treat this as an internal error because malicious code may have caused this. + AmbiguousJobDeath(String), + /// An unexpected error occurred in the job process and we can't be sure whether the candidate + /// is really invalid or some internal glitch occurred. Whenever we are unsure, we can never + /// treat an error as internal as we would abstain from voting. This is bad because if the + /// issue was due to the candidate, then all validators would abstain, stalling finality on the + /// chain. So we will first retry the candidate, and if the issue persists we are forced to + /// vote invalid. + JobError(String), } impl From for ValidationError { diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index aca604f0de2..257377df3f4 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -342,20 +342,27 @@ fn handle_job_finish( }, Outcome::InvalidCandidate { err, idle_worker } => ( Some(idle_worker), - Err(ValidationError::InvalidCandidate(InvalidCandidate::WorkerReportedError(err))), + Err(ValidationError::InvalidCandidate(InvalidCandidate::WorkerReportedInvalid(err))), None, ), Outcome::InternalError { err } => (None, Err(ValidationError::InternalError(err)), None), + // Either the worker or the job timed out. Kill the worker in either case. Treated as + // definitely-invalid, because if we timed out, there's no time left for a retry. Outcome::HardTimeout => (None, Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)), None), // "Maybe invalid" errors (will retry). - Outcome::IoErr => ( + Outcome::WorkerIntfErr => ( None, Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)), None, ), - Outcome::Panic { err } => - (None, Err(ValidationError::InvalidCandidate(InvalidCandidate::Panic(err))), None), + Outcome::JobDied { err } => ( + None, + Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousJobDeath(err))), + None, + ), + Outcome::JobError { err } => + (None, Err(ValidationError::InvalidCandidate(InvalidCandidate::JobError(err))), None), }; queue.metrics.execute_finished(); diff --git a/polkadot/node/core/pvf/src/execute/worker_intf.rs b/polkadot/node/core/pvf/src/execute/worker_intf.rs index 61264f7d517..bf44ba01725 100644 --- a/polkadot/node/core/pvf/src/execute/worker_intf.rs +++ b/polkadot/node/core/pvf/src/execute/worker_intf.rs @@ -30,7 +30,7 @@ use futures_timer::Delay; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, - execute::{Handshake, Response}, + execute::{Handshake, WorkerResponse}, worker_dir, SecurityStatus, }; use polkadot_parachain_primitives::primitives::ValidationResult; @@ -88,19 +88,26 @@ pub enum Outcome { /// a trap. Errors related to the preparation process are not expected to be encountered by the /// execution workers. InvalidCandidate { err: String, idle_worker: IdleWorker }, + /// The execution time exceeded the hard limit. The worker is terminated. + HardTimeout, + /// An I/O error happened during communication with the worker. This may mean that the worker + /// process already died. The token is not returned in any case. + WorkerIntfErr, + /// The job process has died. We must kill the worker just in case. + /// + /// We cannot treat this as an internal error because malicious code may have caused this. + JobDied { err: String }, + /// An unexpected error occurred in the job process. + /// + /// Because malicious code can cause a job error, we must not treat it as an internal error. + JobError { err: String }, + /// An internal error happened during the validation. Such an error is most likely related to /// some transient glitch. /// /// Should only ever be used for errors independent of the candidate and PVF. Therefore it may /// be a problem with the worker, so we terminate it. InternalError { err: InternalValidationError }, - /// The execution time exceeded the hard limit. The worker is terminated. - HardTimeout, - /// An I/O error happened during communication with the worker. This may mean that the worker - /// process already died. The token is not returned in any case. - IoErr, - /// An unexpected panic has occurred in the execution worker. - Panic { err: String }, } /// Given the idle token of a worker and parameters of work, communicates with the worker and @@ -137,7 +144,7 @@ pub async fn start_work( ?error, "failed to send an execute request", ); - return Outcome::IoErr + return Outcome::WorkerIntfErr } // We use a generous timeout here. This is in addition to the one in the child process, in @@ -173,7 +180,7 @@ pub async fn start_work( ); } - return Outcome::IoErr + return Outcome::WorkerIntfErr }, Ok(response) => { // Check if any syscall violations occurred during the job. For now this is @@ -189,7 +196,7 @@ pub async fn start_work( ); } - if let Response::Ok{duration, ..} = response { + if let WorkerResponse::Ok{duration, ..} = response { if duration > execution_timeout { // The job didn't complete within the timeout. gum::warn!( @@ -201,7 +208,7 @@ pub async fn start_work( ); // Return a timeout error. - return Outcome::HardTimeout; + return Outcome::HardTimeout } } @@ -216,23 +223,25 @@ pub async fn start_work( validation_code_hash = ?artifact.id.code_hash, "execution worker exceeded lenient timeout for execution, child worker likely stalled", ); - Response::TimedOut + WorkerResponse::JobTimedOut }, }; match response { - Response::Ok { result_descriptor, duration } => Outcome::Ok { + WorkerResponse::Ok { result_descriptor, duration } => Outcome::Ok { result_descriptor, duration, idle_worker: IdleWorker { stream, pid, worker_dir }, }, - Response::InvalidCandidate(err) => Outcome::InvalidCandidate { + WorkerResponse::InvalidCandidate(err) => Outcome::InvalidCandidate { err, idle_worker: IdleWorker { stream, pid, worker_dir }, }, - Response::TimedOut => Outcome::HardTimeout, - Response::Panic(err) => Outcome::Panic { err }, - Response::InternalError(err) => Outcome::InternalError { err }, + WorkerResponse::JobTimedOut => Outcome::HardTimeout, + WorkerResponse::JobDied(err) => Outcome::JobDied { err }, + WorkerResponse::JobError(err) => Outcome::JobError { err }, + + WorkerResponse::InternalError(err) => Outcome::InternalError { err }, } }) .await @@ -306,9 +315,9 @@ async fn send_request( framed_send(stream, &execution_timeout.encode()).await } -async fn recv_response(stream: &mut UnixStream) -> io::Result { +async fn recv_response(stream: &mut UnixStream) -> io::Result { let response_bytes = framed_recv(stream).await?; - Response::decode(&mut &response_bytes[..]).map_err(|e| { + WorkerResponse::decode(&mut response_bytes.as_slice()).map_err(|e| { io::Error::new( io::ErrorKind::Other, format!("execute pvf recv_response: decode error: {:?}", e), diff --git a/polkadot/node/core/pvf/src/prepare/pool.rs b/polkadot/node/core/pvf/src/prepare/pool.rs index 6bb6ca5b644..8e02f540d32 100644 --- a/polkadot/node/core/pvf/src/prepare/pool.rs +++ b/polkadot/node/core/pvf/src/prepare/pool.rs @@ -339,17 +339,17 @@ fn handle_mux( spawned, worker, idle, - Err(PrepareError::CreateTmpFileErr(err)), + Err(PrepareError::CreateTmpFile(err)), ), // Return `Concluded`, but do not kill the worker since the error was on the host // side. - Outcome::RenameTmpFileErr { worker: idle, result: _, err, src, dest } => + Outcome::RenameTmpFile { worker: idle, result: _, err, src, dest } => handle_concluded_no_rip( from_pool, spawned, worker, idle, - Err(PrepareError::RenameTmpFileErr { err, src, dest }), + Err(PrepareError::RenameTmpFile { err, src, dest }), ), // Could not clear worker cache. Kill the worker so other jobs can't see the data. Outcome::ClearWorkerDir { err } => { @@ -387,6 +387,21 @@ fn handle_mux( Ok(()) }, + // The worker might still be usable, but we kill it just in case. + Outcome::JobDied(err) => { + if attempt_retire(metrics, spawned, worker) { + reply( + from_pool, + FromPool::Concluded { + worker, + rip: true, + result: Err(PrepareError::JobDied(err)), + }, + )?; + } + + Ok(()) + }, Outcome::TimedOut => { if attempt_retire(metrics, spawned, worker) { reply( diff --git a/polkadot/node/core/pvf/src/prepare/worker_intf.rs b/polkadot/node/core/pvf/src/prepare/worker_intf.rs index 0e50caf1feb..a22fa74b2fe 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_intf.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_intf.rs @@ -79,7 +79,7 @@ pub enum Outcome { CreateTmpFileErr { worker: IdleWorker, err: String }, /// The response from the worker is received, but the tmp file cannot be renamed (moved) to the /// final destination location. - RenameTmpFileErr { + RenameTmpFile { worker: IdleWorker, result: PrepareResult, err: String, @@ -100,6 +100,10 @@ pub enum Outcome { IoErr(String), /// The worker ran out of memory and is aborting. The worker should be ripped. OutOfMemory, + /// The preparation job process died, due to OOM, a seccomp violation, or some other factor. + /// + /// The worker might still be usable, but we kill it just in case. + JobDied(String), } /// Given the idle token of a worker and parameters of work, communicates with the worker and @@ -187,21 +191,6 @@ pub async fn start_work( "failed to recv a prepare response: {:?}", err, ); - - // The worker died. Check if it was due to a seccomp violation. - // - // NOTE: Log, but don't change the outcome. Not all validators may have auditing - // enabled, so we don't want attackers to abuse a non-deterministic outcome. - for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await { - gum::error!( - target: LOG_TARGET, - worker_pid = %pid, - %syscall, - ?pvf, - "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" - ); - } - Outcome::IoErr(err.to_string()) }, Err(_) => { @@ -236,6 +225,7 @@ async fn handle_response( Ok(result) => result, // Timed out on the child. This should already be logged by the child. Err(PrepareError::TimedOut) => return Outcome::TimedOut, + Err(PrepareError::JobDied(err)) => return Outcome::JobDied(err), Err(PrepareError::OutOfMemory) => return Outcome::OutOfMemory, Err(_) => return Outcome::Concluded { worker, result }, }; @@ -272,7 +262,7 @@ async fn handle_response( artifact_path.display(), err, ); - Outcome::RenameTmpFileErr { + Outcome::RenameTmpFile { worker, result, err: format!("{:?}", err), diff --git a/polkadot/node/core/pvf/src/testing.rs b/polkadot/node/core/pvf/src/testing.rs index 4c038896f7f..400b65bfe7d 100644 --- a/polkadot/node/core/pvf/src/testing.rs +++ b/polkadot/node/core/pvf/src/testing.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Various things for testing other crates. +//! Various utilities for testing. pub use crate::{ host::{EXECUTE_BINARY_NAME, PREPARE_BINARY_NAME}, @@ -59,27 +59,33 @@ pub fn validate_candidate( /// /// NOTE: This should only be called in dev code (tests, benchmarks) as it relies on the relative /// paths of the built workers. -pub fn get_and_check_worker_paths() -> (PathBuf, PathBuf) { +pub fn build_workers_and_get_paths(is_bench: bool) -> (PathBuf, PathBuf) { // Only needs to be called once for the current process. static WORKER_PATHS: OnceLock> = OnceLock::new(); - fn build_workers() { - let build_args = vec![ + fn build_workers(is_bench: bool) { + let mut build_args = vec![ "build", "--package=polkadot", "--bin=polkadot-prepare-worker", "--bin=polkadot-execute-worker", ]; - let exit_status = std::process::Command::new("cargo") + if is_bench { + // Benches require --release. Regular tests are debug (no flag needed). + build_args.push("--release"); + } + let mut cargo = std::process::Command::new("cargo"); + let cmd = cargo // wasm runtime not needed .env("SKIP_WASM_BUILD", "1") .args(build_args) - .stdout(std::process::Stdio::piped()) - .status() - .expect("Failed to run the build program"); + .stdout(std::process::Stdio::piped()); + + println!("INFO: calling `{cmd:?}`"); + let exit_status = cmd.status().expect("Failed to run the build program"); if !exit_status.success() { - eprintln!("Failed to build workers: {}", exit_status.code().unwrap()); + eprintln!("ERROR: Failed to build workers: {}", exit_status.code().unwrap()); std::process::exit(1); } } @@ -95,23 +101,23 @@ pub fn get_and_check_worker_paths() -> (PathBuf, PathBuf) { // explain why a build happens if !prepare_worker_path.is_executable() { - eprintln!("Prepare worker does not exist or is not executable. Workers directory: {:?}", workers_path); + println!("WARN: Prepare worker does not exist or is not executable. Workers directory: {:?}", workers_path); } if !execute_worker_path.is_executable() { - eprintln!("Execute worker does not exist or is not executable. Workers directory: {:?}", workers_path); + println!("WARN: Execute worker does not exist or is not executable. Workers directory: {:?}", workers_path); } if let Ok(ver) = get_worker_version(&prepare_worker_path) { if ver != NODE_VERSION { - eprintln!("Prepare worker version {ver} does not match node version {NODE_VERSION}; worker path: {prepare_worker_path:?}"); + println!("WARN: Prepare worker version {ver} does not match node version {NODE_VERSION}; worker path: {prepare_worker_path:?}"); } } if let Ok(ver) = get_worker_version(&execute_worker_path) { if ver != NODE_VERSION { - eprintln!("Execute worker version {ver} does not match node version {NODE_VERSION}; worker path: {execute_worker_path:?}"); + println!("WARN: Execute worker version {ver} does not match node version {NODE_VERSION}; worker path: {execute_worker_path:?}"); } } - build_workers(); + build_workers(is_bench); Mutex::new((prepare_worker_path, execute_worker_path)) }); diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index 801b60884fa..d2d842cf84a 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -19,23 +19,23 @@ use assert_matches::assert_matches; use parity_scale_codec::Encode as _; use polkadot_node_core_pvf::{ - start, testing::get_and_check_worker_paths, Config, InvalidCandidate, Metrics, PrepareError, + start, testing::build_workers_and_get_paths, Config, InvalidCandidate, Metrics, PrepareError, PrepareJobKind, PrepareStats, PvfPrepData, ValidationError, ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; use polkadot_primitives::{ExecutorParam, ExecutorParams}; -#[cfg(target_os = "linux")] -use rusty_fork::rusty_fork_test; use std::time::Duration; use tokio::sync::Mutex; mod adder; +#[cfg(target_os = "linux")] +mod process; mod worker_common; -const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); -const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(3); +const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(6); +const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(6); struct TestHost { cache_dir: tempfile::TempDir, @@ -51,7 +51,7 @@ impl TestHost { where F: FnOnce(&mut Config), { - let (prepare_worker_path, execute_worker_path) = get_and_check_worker_paths(); + let (prepare_worker_path, execute_worker_path) = build_workers_and_get_paths(false); let cache_dir = tempfile::tempdir().unwrap(); let mut config = Config::new( @@ -126,7 +126,26 @@ impl TestHost { } #[tokio::test] -async fn terminates_on_timeout() { +async fn prepare_job_terminates_on_timeout() { + let host = TestHost::new().await; + + let start = std::time::Instant::now(); + let result = host + .precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()) + .await; + + match result { + Err(PrepareError::TimedOut) => {}, + r => panic!("{:?}", r), + } + + let duration = std::time::Instant::now().duration_since(start); + assert!(duration >= TEST_PREPARATION_TIMEOUT); + assert!(duration < TEST_PREPARATION_TIMEOUT * JOB_TIMEOUT_WALL_CLOCK_FACTOR); +} + +#[tokio::test] +async fn execute_job_terminates_on_timeout() { let host = TestHost::new().await; let start = std::time::Instant::now(); @@ -153,108 +172,6 @@ async fn terminates_on_timeout() { assert!(duration < TEST_EXECUTION_TIMEOUT * JOB_TIMEOUT_WALL_CLOCK_FACTOR); } -#[cfg(target_os = "linux")] -fn kill_by_sid_and_name(sid: i32, exe_name: &'static str) { - use procfs::process; - - let all_processes: Vec = process::all_processes() - .expect("Can't read /proc") - .filter_map(|p| match p { - Ok(p) => Some(p), // happy path - Err(e) => match e { - // process vanished during iteration, ignore it - procfs::ProcError::NotFound(_) => None, - x => { - panic!("some unknown error: {}", x); - }, - }, - }) - .collect(); - - for process in all_processes { - if process.stat().unwrap().session == sid && - process.exe().unwrap().to_str().unwrap().contains(exe_name) - { - assert_eq!(unsafe { libc::kill(process.pid(), 9) }, 0); - } - } -} - -// Run these tests in their own processes with rusty-fork. They work by each creating a new session, -// then killing the worker process that matches the session ID and expected worker name. -#[cfg(target_os = "linux")] -rusty_fork_test! { - // What happens when the prepare worker dies in the middle of a job? - #[test] - fn prepare_worker_killed_during_job() { - const PROCESS_NAME: &'static str = "polkadot-prepare-worker"; - - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - let host = TestHost::new().await; - - // Create a new session and get the session ID. - let sid = unsafe { libc::setsid() }; - assert!(sid > 0); - - let (result, _) = futures::join!( - // Choose a job that would normally take the entire timeout. - host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), - // Run a future that kills the job in the middle of the timeout. - async { - tokio::time::sleep(TEST_PREPARATION_TIMEOUT / 2).await; - kill_by_sid_and_name(sid, PROCESS_NAME); - } - ); - - assert_matches!(result, Err(PrepareError::IoErr(_))); - }) - } - - // What happens when the execute worker dies in the middle of a job? - #[test] - fn execute_worker_killed_during_job() { - const PROCESS_NAME: &'static str = "polkadot-execute-worker"; - - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - let host = TestHost::new().await; - - // Create a new session and get the session ID. - let sid = unsafe { libc::setsid() }; - assert!(sid > 0); - - // Prepare the artifact ahead of time. - let binary = halt::wasm_binary_unwrap(); - host.precheck_pvf(binary, Default::default()).await.unwrap(); - - let (result, _) = futures::join!( - // Choose an job that would normally take the entire timeout. - host.validate_candidate( - binary, - ValidationParams { - block_data: BlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, - Default::default(), - ), - // Run a future that kills the job in the middle of the timeout. - async { - tokio::time::sleep(TEST_EXECUTION_TIMEOUT / 2).await; - kill_by_sid_and_name(sid, PROCESS_NAME); - } - ); - - assert_matches!( - result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) - ); - }) - } -} - #[cfg(feature = "ci-only-tests")] #[tokio::test] async fn ensure_parallel_execution() { diff --git a/polkadot/node/core/pvf/tests/it/process.rs b/polkadot/node/core/pvf/tests/it/process.rs new file mode 100644 index 00000000000..725d060ab91 --- /dev/null +++ b/polkadot/node/core/pvf/tests/it/process.rs @@ -0,0 +1,383 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test unexpected behaviors of the spawned processes. We test both worker processes (directly +//! spawned by the host) and job processes (spawned by the workers to securely perform PVF jobs). + +use super::TestHost; +use assert_matches::assert_matches; +use polkadot_node_core_pvf::{InvalidCandidate, PrepareError, ValidationError}; +use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams}; +use procfs::process; +use rusty_fork::rusty_fork_test; +use std::time::Duration; + +const PREPARE_PROCESS_NAME: &'static str = "polkadot-prepare-worker"; +const EXECUTE_PROCESS_NAME: &'static str = "polkadot-execute-worker"; + +const SIGNAL_KILL: i32 = 9; +const SIGNAL_STOP: i32 = 19; + +fn send_signal_by_sid_and_name( + sid: i32, + exe_name: &'static str, + is_direct_child: bool, + signal: i32, +) { + let process = find_process_by_sid_and_name(sid, exe_name, is_direct_child); + assert_eq!(unsafe { libc::kill(process.pid(), signal) }, 0); +} +fn get_num_threads_by_sid_and_name(sid: i32, exe_name: &'static str, is_direct_child: bool) -> i64 { + let process = find_process_by_sid_and_name(sid, exe_name, is_direct_child); + process.stat().unwrap().num_threads +} + +fn find_process_by_sid_and_name( + sid: i32, + exe_name: &'static str, + is_direct_child: bool, +) -> process::Process { + let all_processes: Vec = process::all_processes() + .expect("Can't read /proc") + .filter_map(|p| match p { + Ok(p) => Some(p), // happy path + Err(e) => match e { + // process vanished during iteration, ignore it + procfs::ProcError::NotFound(_) => None, + x => { + panic!("some unknown error: {}", x); + }, + }, + }) + .collect(); + + let mut found = None; + for process in all_processes { + let stat = process.stat().unwrap(); + + if stat.session != sid || !process.exe().unwrap().to_str().unwrap().contains(exe_name) { + continue + } + // The workers are direct children of the current process, the worker job processes are not + // (they are children of the workers). + let process_is_direct_child = stat.ppid as u32 == std::process::id(); + if is_direct_child != process_is_direct_child { + continue + } + + if found.is_some() { + panic!("Found more than one process") + } + found = Some(process); + } + found.expect("Should have found the expected process") +} + +// Run these tests in their own processes with rusty-fork. They work by each creating a new session, +// then doing something with the child process that matches the session ID and expected process +// name. +rusty_fork_test! { + // What happens when the prepare worker (not the job) times out? + #[test] + fn prepare_worker_timeout() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Send a stop signal to pause the worker. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true, SIGNAL_STOP); + } + ); + + assert_matches!(result, Err(PrepareError::TimedOut)); + }) + } + + // What happens when the execute worker (not the job) times out? + #[test] + fn execute_worker_timeout() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let (result, _) = futures::join!( + // Choose an job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Send a stop signal to pause the worker. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true, SIGNAL_STOP); + } + ); + + assert_matches!( + result, + Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)) + ); + }) + } + + // What happens when the prepare worker dies in the middle of a job? + #[test] + fn prepare_worker_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + + assert_matches!(result, Err(PrepareError::IoErr(_))); + }) + } + + // What happens when the execute worker dies in the middle of a job? + #[test] + fn execute_worker_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let (result, _) = futures::join!( + // Choose an job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + + assert_matches!( + result, + Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + ); + }) + } + + // What happens when the forked prepare job dies in the middle of its job? + #[test] + fn forked_prepare_job_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, false, SIGNAL_KILL); + } + ); + + // Note that we get a more specific error if the job died than if the whole worker died. + assert_matches!( + result, + Err(PrepareError::JobDied(err)) if err == "received signal: SIGKILL" + ); + }) + } + + // What happens when the forked execute job dies in the middle of its job? + #[test] + fn forked_execute_job_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, false, SIGNAL_KILL); + } + ); + + // Note that we get a more specific error if the job died than if the whole worker died. + assert_matches!( + result, + Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousJobDeath(err))) + if err == "received signal: SIGKILL" + ); + }) + } + + // Ensure that the spawned prepare worker is single-threaded. + // + // See `run_worker` for why we need this invariant. + #[test] + fn ensure_prepare_processes_have_correct_num_threads() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let _ = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + assert_eq!( + get_num_threads_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true), + 1 + ); + // Child job should have three threads: main thread, execute thread, CPU time + // monitor, and memory tracking. + assert_eq!( + get_num_threads_by_sid_and_name(sid, PREPARE_PROCESS_NAME, false), + 4 + ); + + // End the test. + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + }) + } + + // Ensure that the spawned execute worker is single-threaded. + // + // See `run_worker` for why we need this invariant. + #[test] + fn ensure_execute_processes_have_correct_num_threads() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let _ = futures::join!( + // Choose a job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Run a future that tests the thread count while the worker is running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + assert_eq!( + get_num_threads_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true), + 1 + ); + // Child job should have three threads: main thread, execute thread, and CPU + // time monitor. + assert_eq!( + get_num_threads_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, false), + 3 + ); + + // End the test. + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + }) + } +} diff --git a/polkadot/node/core/pvf/tests/it/worker_common.rs b/polkadot/node/core/pvf/tests/it/worker_common.rs index df64980dc80..0d33af7e096 100644 --- a/polkadot/node/core/pvf/tests/it/worker_common.rs +++ b/polkadot/node/core/pvf/tests/it/worker_common.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use polkadot_node_core_pvf::{ - testing::{get_and_check_worker_paths, spawn_with_program_path, SpawnErr}, + testing::{build_workers_and_get_paths, spawn_with_program_path, SpawnErr}, SecurityStatus, }; use std::{env, time::Duration}; @@ -23,7 +23,7 @@ use std::{env, time::Duration}; // Test spawning a program that immediately exits with a failure code. #[tokio::test] async fn spawn_immediate_exit() { - let (prepare_worker_path, _) = get_and_check_worker_paths(); + let (prepare_worker_path, _) = build_workers_and_get_paths(false); // There's no explicit `exit` subcommand in the worker; it will panic on an unknown // subcommand anyway @@ -41,7 +41,7 @@ async fn spawn_immediate_exit() { #[tokio::test] async fn spawn_timeout() { - let (_, execute_worker_path) = get_and_check_worker_paths(); + let (_, execute_worker_path) = build_workers_and_get_paths(false); let result = spawn_with_program_path( "integration-test", @@ -57,7 +57,7 @@ async fn spawn_timeout() { #[tokio::test] async fn should_connect() { - let (prepare_worker_path, _) = get_and_check_worker_paths(); + let (prepare_worker_path, _) = build_workers_and_get_paths(false); let _ = spawn_with_program_path( "integration-test", diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md index 52129f9eb80..4dbb7980c1b 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md @@ -1,7 +1,11 @@ # PVF Host and Workers The PVF host is responsible for handling requests to prepare and execute PVF -code blobs, which it sends to PVF workers running in their own child processes. +code blobs, which it sends to PVF **workers** running in their own child +processes. + +While the workers are generally long-living, they also spawn one-off secure +**job processes** that perform the jobs. See "Job Processes" section below. This system has two high-levels goals that we will touch on here: *determinism* and *security*. @@ -36,8 +40,11 @@ execution request: not successful. 2. **Artifact missing:** The prepared artifact might have been deleted due to operator error or some bug in the system. -3. **Panic:** The worker thread panicked for some indeterminate reason, which - may or may not be independent of the candidate or PVF. +3. **Job errors:** For example, the worker thread panicked for some + indeterminate reason, which may or may not be independent of the candidate or + PVF. +4. **Internal errors:** See "Internal Errors" section. In this case, after the + retry we abstain from voting. ### Preparation timeouts @@ -62,10 +69,16 @@ more than the CPU time. ### Internal errors +An internal, or local, error is one that we treat as independent of the PVF +and/or candidate, i.e. local to the running machine. If this happens, then we +will first retry the job and if the errors persists, then we simply do not vote. +This prevents slashes, since otherwise our vote may not agree with that of the +other validators. + In general, for errors not raising a dispute we have to be very careful. This is -only sound, if we either: +only sound, if either: -1. Ruled out that error in pre-checking. If something is not checked in +1. We ruled out that error in pre-checking. If something is not checked in pre-checking, even if independent of the candidate and PVF, we must raise a dispute. 2. We are 100% confident that it is a hardware/local issue: Like corrupted file, @@ -75,11 +88,11 @@ Reasoning: Otherwise it would be possible to register a PVF where candidates can not be checked, but we don't get a dispute - so nobody gets punished. Second, we end up with a finality stall that is not going to resolve! -There are some error conditions where we can't be sure whether the candidate is -really invalid or some internal glitch occurred, e.g. panics. Whenever we are -unsure, we can never treat an error as internal as we would abstain from voting. -So we will first retry the candidate, and if the issue persists we are forced to -vote invalid. +Note that any error from the job process we cannot treat as internal. The job +runs untrusted code and an attacker can therefore return arbitrary errors. If +they were to return errors that we treat as internal, they could make us abstain +from voting. Since we are unsure if such errors are legitimate, we will first +retry the candidate, and if the issue persists we are forced to vote invalid. ## Security @@ -119,6 +132,20 @@ So what are we actually worried about? Things that come to mind: 6. **Intercepting and manipulating packages** - Effect very similar to the above, hard to do without also being able to do 4 or 5. +### Job Processes + +As mentioned above, our architecture includes long-living **worker processes** +and one-off **job processes*. This separation is important so that the handling +of untrusted code can be limited to the job processes. A hijacked job process +can therefore not interfere with other jobs running in separate processes. + +Furthermore, if an unexpected execution error occurred in the worker and not the +job, we generally can be confident that it has nothing to do with the candidate, +so we can abstain from voting. On the other hand, a hijacked job can send back +erroneous responses for candidates, so we know that we should not abstain from +voting on such errors from jobs. Otherwise, an attacker could trigger a finality +stall. (See "Internal Errors" section above.) + ### Restricting file-system access A basic security mechanism is to make sure that any process directly interfacing diff --git a/polkadot/scripts/list-syscalls/execute-worker-syscalls b/polkadot/scripts/list-syscalls/execute-worker-syscalls index 4a7a6618129..349af783cf1 100644 --- a/polkadot/scripts/list-syscalls/execute-worker-syscalls +++ b/polkadot/scripts/list-syscalls/execute-worker-syscalls @@ -16,6 +16,7 @@ 16 (ioctl) 19 (readv) 20 (writev) +22 (pipe) 24 (sched_yield) 25 (mremap) 28 (madvise) @@ -25,7 +26,9 @@ 45 (recvfrom) 46 (sendmsg) 56 (clone) +57 (fork) 60 (exit) +61 (wait4) 62 (kill) 72 (fcntl) 79 (getcwd) @@ -36,6 +39,7 @@ 89 (readlink) 96 (gettimeofday) 97 (getrlimit) +98 (getrusage) 99 (sysinfo) 102 (getuid) 110 (getppid) @@ -47,6 +51,7 @@ 158 (arch_prctl) 165 (mount) 166 (umount2) +186 (gettid) 200 (tkill) 202 (futex) 204 (sched_getaffinity) @@ -60,6 +65,7 @@ 263 (unlinkat) 272 (unshare) 273 (set_robust_list) +293 (pipe2) 302 (prlimit64) 318 (getrandom) 319 (memfd_create) diff --git a/polkadot/scripts/list-syscalls/prepare-worker-syscalls b/polkadot/scripts/list-syscalls/prepare-worker-syscalls index cab58e06692..05281b61591 100644 --- a/polkadot/scripts/list-syscalls/prepare-worker-syscalls +++ b/polkadot/scripts/list-syscalls/prepare-worker-syscalls @@ -16,6 +16,7 @@ 16 (ioctl) 19 (readv) 20 (writev) +22 (pipe) 24 (sched_yield) 25 (mremap) 28 (madvise) @@ -25,7 +26,9 @@ 45 (recvfrom) 46 (sendmsg) 56 (clone) +57 (fork) 60 (exit) +61 (wait4) 62 (kill) 72 (fcntl) 79 (getcwd) @@ -48,6 +51,7 @@ 158 (arch_prctl) 165 (mount) 166 (umount2) +186 (gettid) 200 (tkill) 202 (futex) 203 (sched_setaffinity) @@ -62,6 +66,7 @@ 263 (unlinkat) 272 (unshare) 273 (set_robust_list) +293 (pipe2) 302 (prlimit64) 309 (getcpu) 318 (getrandom) -- GitLab From 31c38cea3def97044c328f3a9717eeb471c261fe Mon Sep 17 00:00:00 2001 From: Kristian Sosnin <48099298+slumber@users.noreply.github.com> Date: Tue, 14 Nov 2023 22:14:59 +0400 Subject: [PATCH 29/74] statement-distribution: support inactive local validator in grid (#1571) Fixes #1437 Co-authored-by: Sophia Gold --- .../statement-distribution/src/v2/mod.rs | 335 +++++++++++------- .../src/v2/tests/cluster.rs | 70 ++-- .../src/v2/tests/grid.rs | 241 ++++++++++--- .../src/v2/tests/mod.rs | 51 ++- .../src/v2/tests/requests.rs | 76 ++-- 5 files changed, 514 insertions(+), 259 deletions(-) diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 6f39a5c504d..406f1130590 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -142,8 +142,27 @@ struct PerRelayParentState { session: SessionIndex, } +impl PerRelayParentState { + fn active_validator_state(&self) -> Option<&ActiveValidatorState> { + self.local_validator.as_ref().and_then(|local| local.active.as_ref()) + } + + fn active_validator_state_mut(&mut self) -> Option<&mut ActiveValidatorState> { + self.local_validator.as_mut().and_then(|local| local.active.as_mut()) + } +} + // per-relay-parent local validator state. struct LocalValidatorState { + // the grid-level communication at this relay-parent. + grid_tracker: GridTracker, + // additional fields in case local node is an active validator. + active: Option, + // local index actually exists in case node is inactive validator, however, + // it's not needed outside of `build_session_topology`, where it's known. +} + +struct ActiveValidatorState { // The index of the validator. index: ValidatorIndex, // our validator group @@ -152,8 +171,14 @@ struct LocalValidatorState { assignment: Option, // the 'direct-in-group' communication at this relay-parent. cluster_tracker: ClusterTracker, - // the grid-level communication at this relay-parent. - grid_tracker: GridTracker, +} + +#[derive(Debug, Copy, Clone)] +enum LocalValidatorIndex { + // Local node is an active validator. + Active(ValidatorIndex), + // Local node is not in active validator set. + Inactive, } #[derive(Debug)] @@ -164,7 +189,7 @@ struct PerSessionState { // is only `None` in the time between seeing a session and // getting the topology from the gossip-support subsystem grid_view: Option, - local_validator: Option, + local_validator: Option, } impl PerSessionState { @@ -178,15 +203,10 @@ impl PerSessionState { let local_validator = polkadot_node_subsystem_util::signing_key_and_index( session_info.validators.iter(), keystore, - ); + ) + .map(|(_, index)| LocalValidatorIndex::Active(index)); - PerSessionState { - session_info, - groups, - authority_lookup, - grid_view: None, - local_validator: local_validator.map(|(_key, index)| index), - } + PerSessionState { session_info, groups, authority_lookup, grid_view: None, local_validator } } fn supply_topology( @@ -204,6 +224,16 @@ impl PerSessionState { ); self.grid_view = Some(grid_view); + if local_index.is_some() { + self.local_validator.get_or_insert(LocalValidatorIndex::Inactive); + } + } + + /// Returns `true` if local is neither active or inactive validator node. + /// + /// `false` is also returned if session topology is not known yet. + fn is_not_validator(&self) -> bool { + self.grid_view.is_some() && self.local_validator.is_none() } } @@ -554,13 +584,17 @@ pub(crate) async fn handle_active_leaves_update( .expect("either existed or just inserted; qed"); let local_validator = per_session.local_validator.and_then(|v| { - find_local_validator_state( - v, - &per_session.groups, - &availability_cores, - &group_rotation_info, - seconding_limit, - ) + if let LocalValidatorIndex::Active(idx) = v { + find_active_validator_state( + idx, + &per_session.groups, + &availability_cores, + &group_rotation_info, + seconding_limit, + ) + } else { + Some(LocalValidatorState { grid_tracker: GridTracker::default(), active: None }) + } }); state.per_relay_parent.insert( @@ -607,7 +641,7 @@ pub(crate) async fn handle_active_leaves_update( Ok(()) } -fn find_local_validator_state( +fn find_active_validator_state( validator_index: ValidatorIndex, groups: &Groups, availability_cores: &[CoreState], @@ -628,11 +662,13 @@ fn find_local_validator_state( let group_validators = groups.get(our_group)?.to_owned(); Some(LocalValidatorState { - index: validator_index, - group: our_group, - assignment: para, - cluster_tracker: ClusterTracker::new(group_validators, seconding_limit) - .expect("group is non-empty because we are in it; qed"), + active: Some(ActiveValidatorState { + index: validator_index, + group: our_group, + assignment: para, + cluster_tracker: ClusterTracker::new(group_validators, seconding_limit) + .expect("group is non-empty because we are in it; qed"), + }), grid_tracker: GridTracker::default(), }) } @@ -725,13 +761,17 @@ async fn send_peer_messages_for_relay_parent( for validator_id in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { per_session_state.authority_lookup.get(a) }) { - if let Some(local_validator_state) = relay_parent_state.local_validator.as_mut() { + if let Some(active) = relay_parent_state + .local_validator + .as_mut() + .and_then(|local| local.active.as_mut()) + { send_pending_cluster_statements( ctx, relay_parent, &(peer, peer_data.protocol_version), validator_id, - &mut local_validator_state.cluster_tracker, + &mut active.cluster_tracker, &state.candidates, &relay_parent_state.statement_store, ) @@ -1009,7 +1049,7 @@ pub(crate) async fn share_local_statement( }; let (local_index, local_assignment, local_group) = - match per_relay_parent.local_validator.as_ref() { + match per_relay_parent.active_validator_state() { None => return Err(JfyiError::InvalidShare), Some(l) => (l.index, l.assignment, l.group), }; @@ -1086,7 +1126,7 @@ pub(crate) async fn share_local_statement( } { - let l = per_relay_parent.local_validator.as_mut().expect("checked above; qed"); + let l = per_relay_parent.active_validator_state_mut().expect("checked above; qed"); l.cluster_tracker.note_issued(local_index, compact_statement.payload().clone()); } @@ -1173,31 +1213,41 @@ async fn circulate_statement( // We're not meant to circulate statements in the cluster until we have the confirmed // candidate. - let cluster_relevant = Some(local_validator.group) == statement_group; - let cluster_targets = if is_confirmed && cluster_relevant { - Some( - local_validator - .cluster_tracker - .targets() - .iter() - .filter(|&&v| { - local_validator + // + // Cluster is only relevant if local node is an active validator. + let (cluster_relevant, cluster_targets, all_cluster_targets) = local_validator + .active + .as_mut() + .map(|active| { + let cluster_relevant = Some(active.group) == statement_group; + let cluster_targets = if is_confirmed && cluster_relevant { + Some( + active .cluster_tracker - .can_send(v, originator, compact_statement.clone()) - .is_ok() - }) - .filter(|&v| v != &local_validator.index) - .map(|v| (*v, DirectTargetKind::Cluster)), - ) - } else { - None - }; + .targets() + .iter() + .filter(|&&v| { + active + .cluster_tracker + .can_send(v, originator, compact_statement.clone()) + .is_ok() + }) + .filter(|&v| v != &active.index) + .map(|v| (*v, DirectTargetKind::Cluster)), + ) + } else { + None + }; + let all_cluster_targets = active.cluster_tracker.targets(); + (cluster_relevant, cluster_targets, all_cluster_targets) + }) + .unwrap_or((false, None, &[])); let grid_targets = local_validator .grid_tracker .direct_statement_targets(&per_session.groups, originator, &compact_statement) .into_iter() - .filter(|v| !cluster_relevant || !local_validator.cluster_tracker.targets().contains(v)) + .filter(|v| !cluster_relevant || !all_cluster_targets.contains(v)) .map(|v| (v, DirectTargetKind::Grid)); let targets = cluster_targets @@ -1229,18 +1279,17 @@ async fn circulate_statement( match kind { DirectTargetKind::Cluster => { + let active = local_validator + .active + .as_mut() + .expect("cluster target means local is active validator; qed"); + // At this point, all peers in the cluster should 'know' // the candidate, so we don't expect for this to fail. - if let Ok(()) = local_validator.cluster_tracker.can_send( - target, - originator, - compact_statement.clone(), - ) { - local_validator.cluster_tracker.note_sent( - target, - originator, - compact_statement.clone(), - ); + if let Ok(()) = + active.cluster_tracker.can_send(target, originator, compact_statement.clone()) + { + active.cluster_tracker.note_sent(target, originator, compact_statement.clone()); statement_to_peers.push(peer_id); } }, @@ -1387,7 +1436,9 @@ async fn handle_incoming_statement( None => { // we shouldn't be receiving statements unless we're a validator // this session. - modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + if per_session.is_not_validator() { + modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + } return }, Some(l) => l, @@ -1402,73 +1453,81 @@ async fn handle_incoming_statement( }, }; - let cluster_sender_index = { + let (active, cluster_sender_index) = { // This block of code only returns `Some` when both the originator and // the sending peer are in the cluster. + let active = local_validator.active.as_mut(); - let allowed_senders = local_validator - .cluster_tracker - .senders_for_originator(statement.unchecked_validator_index()); + let allowed_senders = active + .as_ref() + .map(|active| { + active + .cluster_tracker + .senders_for_originator(statement.unchecked_validator_index()) + }) + .unwrap_or_default(); - allowed_senders + let idx = allowed_senders .iter() .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (*i, ad))) .filter(|(_, ad)| peer_state.is_authority(ad)) .map(|(i, _)| i) - .next() - }; - - let checked_statement = if let Some(cluster_sender_index) = cluster_sender_index { - match handle_cluster_statement( - relay_parent, - &mut local_validator.cluster_tracker, - per_relay_parent.session, - &per_session.session_info, - statement, - cluster_sender_index, - ) { - Ok(Some(s)) => s, - Ok(None) => return, - Err(rep) => { - modify_reputation(reputation, ctx.sender(), peer, rep).await; - return - }, - } - } else { - let grid_sender_index = local_validator - .grid_tracker - .direct_statement_providers( - &per_session.groups, - statement.unchecked_validator_index(), - statement.unchecked_payload(), - ) - .into_iter() - .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (i, ad))) - .filter(|(_, ad)| peer_state.is_authority(ad)) - .map(|(i, _)| i) .next(); + (active, idx) + }; - if let Some(grid_sender_index) = grid_sender_index { - match handle_grid_statement( + let checked_statement = + if let Some((active, cluster_sender_index)) = active.zip(cluster_sender_index) { + match handle_cluster_statement( relay_parent, - &mut local_validator.grid_tracker, + &mut active.cluster_tracker, per_relay_parent.session, - &per_session, + &per_session.session_info, statement, - grid_sender_index, + cluster_sender_index, ) { - Ok(s) => s, + Ok(Some(s)) => s, + Ok(None) => return, Err(rep) => { modify_reputation(reputation, ctx.sender(), peer, rep).await; return }, } } else { - // Not a cluster or grid peer. - modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; - return - } - }; + let grid_sender_index = local_validator + .grid_tracker + .direct_statement_providers( + &per_session.groups, + statement.unchecked_validator_index(), + statement.unchecked_payload(), + ) + .into_iter() + .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (i, ad))) + .filter(|(_, ad)| peer_state.is_authority(ad)) + .map(|(i, _)| i) + .next(); + + if let Some(grid_sender_index) = grid_sender_index { + match handle_grid_statement( + relay_parent, + &mut local_validator.grid_tracker, + per_relay_parent.session, + &per_session, + statement, + grid_sender_index, + ) { + Ok(s) => s, + Err(rep) => { + modify_reputation(reputation, ctx.sender(), peer, rep).await; + return + }, + } + } else { + // Not a cluster or grid peer. + modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + } + }; let statement = checked_statement.payload().clone(); let originator_index = checked_statement.validator_index(); @@ -1536,7 +1595,7 @@ async fn handle_incoming_statement( local_validator.grid_tracker.learned_fresh_statement( &per_session.groups, session_topology, - local_validator.index, + originator_index, &statement, ); } @@ -1834,7 +1893,7 @@ async fn provide_candidate_to_grid( gum::debug!( target: LOG_TARGET, ?candidate_hash, - local_validator = ?local_validator.index, + local_validator = ?per_session.local_validator, n_peers = manifest_peers_v2.len(), "Sending manifest to v2 peers" ); @@ -1853,7 +1912,7 @@ async fn provide_candidate_to_grid( gum::debug!( target: LOG_TARGET, ?candidate_hash, - local_validator = ?local_validator.index, + local_validator = ?per_session.local_validator, n_peers = manifest_peers_vstaging.len(), "Sending manifest to vstaging peers" ); @@ -1874,7 +1933,7 @@ async fn provide_candidate_to_grid( gum::debug!( target: LOG_TARGET, ?candidate_hash, - local_validator = ?local_validator.index, + local_validator = ?per_session.local_validator, n_peers = ack_peers_v2.len(), "Sending acknowledgement to v2 peers" ); @@ -1893,7 +1952,7 @@ async fn provide_candidate_to_grid( gum::debug!( target: LOG_TARGET, ?candidate_hash, - local_validator = ?local_validator.index, + local_validator = ?per_session.local_validator, n_peers = ack_peers_vstaging.len(), "Sending acknowledgement to vstaging peers" ); @@ -2086,13 +2145,15 @@ async fn handle_incoming_manifest_common<'a, Context>( let local_validator = match relay_parent_state.local_validator.as_mut() { None => { - modify_reputation( - reputation, - ctx.sender(), - peer, - COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE, - ) - .await; + if per_session.is_not_validator() { + modify_reputation( + reputation, + ctx.sender(), + peer, + COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE, + ) + .await; + } return None }, Some(x) => x, @@ -2188,7 +2249,7 @@ async fn handle_incoming_manifest_common<'a, Context>( target: LOG_TARGET, ?candidate_hash, from = ?sender_index, - local_index = ?local_validator.index, + local_index = ?per_session.local_validator, ?manifest_kind, "immediate ack, known candidate" ); @@ -2593,7 +2654,7 @@ async fn send_cluster_candidate_statements( Some(s) => s, }; - let local_group = match relay_parent_state.local_validator.as_mut() { + let local_group = match relay_parent_state.active_validator_state_mut() { None => return, Some(v) => v.group, }; @@ -2680,11 +2741,10 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St }) { // For cluster members, they haven't advertised any statements in particular, // but have surely sent us some. - if local_validator - .cluster_tracker - .knows_candidate(validator_id, identifier.candidate_hash) - { - return Some(StatementFilter::blank(local_validator.cluster_tracker.targets().len())) + if let Some(active) = local_validator.active.as_ref() { + if active.cluster_tracker.knows_candidate(validator_id, identifier.candidate_hash) { + return Some(StatementFilter::blank(active.cluster_tracker.targets().len())) + } } let filter = local_validator @@ -2715,7 +2775,11 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St } // don't require a backing threshold for cluster candidates. - let require_backing = relay_parent_state.local_validator.as_ref()?.group != group_index; + let local_validator = relay_parent_state.local_validator.as_ref()?; + let require_backing = local_validator + .active + .as_ref() + .map_or(true, |active| active.group != group_index); Some(RequestProperties { unwanted_mask, @@ -2973,7 +3037,11 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { for v in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { per_session.authority_lookup.get(a) }) { - if local_validator.cluster_tracker.can_request(v, *candidate_hash) { + if local_validator + .active + .as_ref() + .map_or(false, |active| active.cluster_tracker.can_request(v, *candidate_hash)) + { validator_id = Some(v); is_cluster = true; break @@ -3015,11 +3083,16 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { // Update bookkeeping about which statements peers have received. for statement in &statements { if is_cluster { - local_validator.cluster_tracker.note_sent( - validator_id, - statement.unchecked_validator_index(), - statement.unchecked_payload().clone(), - ); + local_validator + .active + .as_mut() + .expect("cluster peer means local is active validator; qed") + .cluster_tracker + .note_sent( + validator_id, + statement.unchecked_validator_index(), + statement.unchecked_payload().clone(), + ); } else { local_validator.grid_tracker.sent_or_received_direct_statement( &per_session.groups, diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs index 80dec1d75ab..a9f5b537b32 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs @@ -23,7 +23,7 @@ fn share_seconded_circulated_to_cluster() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -34,7 +34,8 @@ fn share_seconded_circulated_to_cluster() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -52,7 +53,7 @@ fn share_seconded_circulated_to_cluster() { // peer B is in group, has no relay parent in view. // peer C is not in group, has relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -130,7 +131,7 @@ fn cluster_valid_statement_before_seconded_ignored() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -139,12 +140,13 @@ fn cluster_valid_statement_before_seconded_ignored() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; connect_peer( &mut overseer, @@ -197,7 +199,7 @@ fn cluster_statement_bad_signature() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -206,12 +208,13 @@ fn cluster_statement_bad_signature() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -277,7 +280,7 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -286,13 +289,13 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is not in group, has relay parent in view. - let not_our_group = - if local_validator.group_index.0 == 0 { GroupIndex(1) } else { GroupIndex(0) }; + let not_our_group = if local_group_index.0 == 0 { GroupIndex(1) } else { GroupIndex(0) }; let that_group_validators = state.group_validators(not_our_group, false); let v_non = that_group_validators[0]; @@ -346,7 +349,7 @@ fn statement_from_non_cluster_originator_unexpected() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -355,12 +358,13 @@ fn statement_from_non_cluster_originator_unexpected() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is not in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; connect_peer(&mut overseer, peer_a.clone(), None).await; @@ -408,7 +412,7 @@ fn seconded_statement_leads_to_request() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -417,7 +421,8 @@ fn seconded_statement_leads_to_request() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -432,7 +437,7 @@ fn seconded_statement_leads_to_request() { let candidate_hash = candidate.hash(); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; connect_peer( @@ -503,7 +508,7 @@ fn cluster_statements_shared_seconded_first() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -512,7 +517,8 @@ fn cluster_statements_shared_seconded_first() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -528,7 +534,7 @@ fn cluster_statements_shared_seconded_first() { // peer A is in group, no relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -624,7 +630,7 @@ fn cluster_accounts_for_implicit_view() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -634,7 +640,8 @@ fn cluster_accounts_for_implicit_view() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -651,7 +658,7 @@ fn cluster_accounts_for_implicit_view() { // peer A is in group, has relay parent in view. // peer B is in group, has no relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -775,7 +782,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -784,7 +791,8 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -799,7 +807,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { let candidate_hash = candidate.hash(); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; { connect_peer( @@ -907,7 +915,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -916,7 +924,8 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -931,7 +940,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { let candidate_hash = candidate.hash(); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; { connect_peer( @@ -1048,7 +1057,7 @@ fn ensure_seconding_limit_is_respected() { let config = TestConfig { validator_count: 20, group_size: 4, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: Some(AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 3, @@ -1060,7 +1069,8 @@ fn ensure_seconding_limit_is_respected() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1092,7 +1102,7 @@ fn ensure_seconding_limit_is_respected() { let candidate_hash_2 = candidate_2.hash(); let candidate_hash_3 = candidate_3.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; // peers A,B,C are in group, have relay parent in view. diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index 5b1dabfc8a0..9802db06082 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -29,7 +29,7 @@ fn backed_candidate_leads_to_advertisement() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -41,7 +41,8 @@ fn backed_candidate_leads_to_advertisement() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -55,9 +56,9 @@ fn backed_candidate_leads_to_advertisement() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; let v_c = target_group_validators[0]; @@ -219,7 +220,7 @@ fn backed_candidate_leads_to_advertisement() { assert_eq!(manifest, BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -244,7 +245,7 @@ fn received_advertisement_before_confirmation_leads_to_request() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -256,9 +257,9 @@ fn received_advertisement_before_confirmation_leads_to_request() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -273,7 +274,7 @@ fn received_advertisement_before_confirmation_leads_to_request() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = state.group_validators(other_group, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -424,7 +425,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -435,9 +436,9 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -672,7 +673,7 @@ fn received_advertisement_after_confirmation_before_backing() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -683,9 +684,9 @@ fn received_advertisement_after_confirmation_before_backing() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -858,7 +859,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -869,9 +870,9 @@ fn additional_statements_are_shared_after_manifest_exchange() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1155,7 +1156,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1167,7 +1168,8 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1181,9 +1183,9 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; let v_c = target_group_validators[0]; @@ -1336,7 +1338,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { let expected_manifest = BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -1377,7 +1379,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1389,7 +1391,8 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1403,9 +1406,9 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; let v_c = target_group_validators[0]; @@ -1567,7 +1570,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { assert_eq!(manifest, BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -1599,7 +1602,7 @@ fn grid_statements_imported_to_backing() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1610,9 +1613,9 @@ fn grid_statements_imported_to_backing() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1803,7 +1806,7 @@ fn advertisements_rejected_from_incorrect_peers() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1815,9 +1818,9 @@ fn advertisements_rejected_from_incorrect_peers() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1832,12 +1835,12 @@ fn advertisements_rejected_from_incorrect_peers() { ); let candidate_hash = candidate.hash(); - let target_group_validators = state.group_validators(local_validator.group_index, true); - let other_group_validators = state.group_validators(other_group, true); - let v_a = target_group_validators[0]; - let v_b = target_group_validators[1]; - let v_c = other_group_validators[0]; - let v_d = other_group_validators[1]; + let other_group_validators = state.group_validators(local_group_index, true); + let target_group_validators = state.group_validators(other_group, true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; // peer A is in group, has relay parent in view. // peer B is in group, has no relay parent in view. @@ -1948,7 +1951,7 @@ fn manifest_rejected_with_unknown_relay_parent() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1959,9 +1962,9 @@ fn manifest_rejected_with_unknown_relay_parent() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -2054,7 +2057,7 @@ fn manifest_rejected_when_not_a_validator() { let config = TestConfig { validator_count, group_size, - local_validator: false, + local_validator: LocalRole::None, async_backing_params: None, }; @@ -2156,7 +2159,7 @@ fn manifest_rejected_when_group_does_not_match_para() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -2166,9 +2169,9 @@ fn manifest_rejected_when_group_does_not_match_para() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); // Create a mismatch between group and para. let other_para = next_group_index(other_group, validator_count, group_size); let other_para = ParaId::from(other_para.0); @@ -2263,7 +2266,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -2274,9 +2277,9 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -2454,3 +2457,141 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { overseer }); } + +#[test] +fn inactive_local_participates_in_grid() { + let validator_count = 11; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: LocalRole::InactiveValidator, + async_backing_params: None, + }; + + let dummy_relay_parent = Hash::repeat_byte(2); + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + assert_eq!(local_validator.validator_index.0, validator_count as u32); + + let group_idx = GroupIndex::from(0); + let para = ParaId::from(0); + + // Dummy leaf is needed to update topology. + let dummy_leaf = state.make_dummy_leaf(Hash::repeat_byte(2)); + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + para, + test_leaf.para_data(para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let first_group = state.group_validators(group_idx, true); + let v_a = first_group.last().unwrap().clone(); + let v_b = first_group.first().unwrap().clone(); + + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &dummy_leaf, &state, true).await; + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(dummy_relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + activate_leaf(&mut overseer, &test_leaf, &state, false).await; + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Receive an advertisement from A. + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: group_idx, + para_id: para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }; + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + ) + .await; + + let statements = vec![ + state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + // Inactive node requests this candidate. + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + for _ in 0..2 { + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_STATEMENT.into() => { } + ); + } + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { } + ); + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + overseer + }); +} diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 4150377a0c6..4e626977524 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -61,19 +61,30 @@ const DEFAULT_ASYNC_BACKING_PARAMETERS: AsyncBackingParams = // Some deterministic genesis hash for req/res protocol names const GENESIS_HASH: Hash = Hash::repeat_byte(0xff); +#[derive(Debug, Copy, Clone)] +enum LocalRole { + /// Active validator. + Validator, + /// Authority, not in active validator set. + InactiveValidator, + /// Not a validator. + None, +} + struct TestConfig { + // number of active validators. validator_count: usize, // how many validators to place in each group. group_size: usize, // whether the local node should be a validator - local_validator: bool, + local_validator: LocalRole, async_backing_params: Option, } #[derive(Debug, Clone)] struct TestLocalValidator { validator_index: ValidatorIndex, - group_index: GroupIndex, + group_index: Option, } struct TestState { @@ -99,7 +110,7 @@ impl TestState { let mut assignment_keys = Vec::new(); let mut validator_groups = Vec::new(); - let local_validator_pos = if config.local_validator { + let local_validator_pos = if let LocalRole::Validator = config.local_validator { // ensure local validator is always in a full group. Some(rng.gen_range(0..config.validator_count).saturating_sub(config.group_size - 1)) } else { @@ -128,13 +139,19 @@ impl TestState { } } - let local = if let Some(local_pos) = local_validator_pos { - Some(TestLocalValidator { + let local = match (config.local_validator, local_validator_pos) { + (LocalRole::Validator, Some(local_pos)) => Some(TestLocalValidator { validator_index: ValidatorIndex(local_pos as _), - group_index: GroupIndex((local_pos / config.group_size) as _), - }) - } else { - None + group_index: Some(GroupIndex((local_pos / config.group_size) as _)), + }), + (LocalRole::InactiveValidator, None) => { + discovery_keys.push(AuthorityDiscoveryPair::generate().0.public()); + Some(TestLocalValidator { + validator_index: ValidatorIndex(config.validator_count as u32), + group_index: None, + }) + }, + _ => None, }; let validator_public = validator_pubkeys(&validators); @@ -181,15 +198,23 @@ impl TestState { fn make_dummy_topology(&self) -> NewGossipTopology { let validator_count = self.config.validator_count; + let is_local_inactive = matches!(self.config.local_validator, LocalRole::InactiveValidator); + + let mut indices: Vec = (0..validator_count).collect(); + if is_local_inactive { + indices.push(validator_count); + } + NewGossipTopology { session: 1, topology: SessionGridTopology::new( - (0..validator_count).collect(), - (0..validator_count) + indices.clone(), + indices + .into_iter() .map(|i| TopologyPeerInfo { peer_ids: Vec::new(), validator_index: ValidatorIndex(i as u32), - discovery_id: AuthorityDiscoveryPair::generate().0.public(), + discovery_id: self.session_info.discovery_keys[i].clone(), }) .collect(), ), @@ -276,7 +301,7 @@ fn test_harness>( test: impl FnOnce(TestState, VirtualOverseer) -> T, ) { let pool = sp_core::testing::TaskExecutor::new(); - let keystore = if config.local_validator { + let keystore = if let LocalRole::Validator = config.local_validator { test_helpers::mock::make_ferdie_keystore() } else { Arc::new(LocalKeystore::in_memory()) as KeystorePtr diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index 4734d7a0f96..1eec8290fab 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -32,7 +32,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -43,7 +43,8 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -57,7 +58,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -188,7 +189,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: Some(AsyncBackingParams { // Makes `seconding_limit: 2` (easier to hit the limit). max_candidate_depth: 1, @@ -203,9 +204,9 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -475,7 +476,7 @@ fn peer_reported_for_not_enough_statements() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -486,9 +487,9 @@ fn peer_reported_for_not_enough_statements() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -670,7 +671,7 @@ fn peer_reported_for_duplicate_statements() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -681,7 +682,8 @@ fn peer_reported_for_duplicate_statements() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -695,7 +697,7 @@ fn peer_reported_for_duplicate_statements() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -830,7 +832,7 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -841,7 +843,8 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -855,8 +858,8 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); - state.group_validators((local_validator.group_index.0 + 1).into(), true); + let other_group_validators = state.group_validators(local_group_index, true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -968,7 +971,7 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -979,7 +982,8 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -993,9 +997,8 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); - let next_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + let other_group_validators = state.group_validators(local_group_index, true); + let next_group_validators = state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_c = next_group_validators[0]; @@ -1105,7 +1108,7 @@ fn local_node_sanity_checks_incoming_requests() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1117,7 +1120,8 @@ fn local_node_sanity_checks_incoming_requests() { test_harness(config, |mut state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1135,7 +1139,7 @@ fn local_node_sanity_checks_incoming_requests() { // peer B is in group, has no relay parent in view. // peer C is not in group, has relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -1311,7 +1315,7 @@ fn local_node_checks_that_peer_can_request_before_responding() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1321,7 +1325,8 @@ fn local_node_checks_that_peer_can_request_before_responding() { test_harness(config, |mut state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1336,7 +1341,7 @@ fn local_node_checks_that_peer_can_request_before_responding() { let candidate_hash = candidate.hash(); // Peers A and B are in group and have relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -1515,7 +1520,7 @@ fn local_node_respects_statement_mask() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1527,7 +1532,8 @@ fn local_node_respects_statement_mask() { test_harness(config, |mut state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1541,9 +1547,9 @@ fn local_node_respects_statement_mask() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; let v_c = target_group_validators[0]; @@ -1707,7 +1713,7 @@ fn local_node_respects_statement_mask() { assert_eq!(manifest, BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -1761,7 +1767,7 @@ fn should_delay_before_retrying_dropped_requests() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1772,9 +1778,9 @@ fn should_delay_before_retrying_dropped_requests() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); -- GitLab From fc12f435e3796d33018aab93a9e8cd851d4431d5 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Tue, 14 Nov 2023 20:48:32 +0200 Subject: [PATCH 30/74] add NodeFeatures field to HostConfiguration and runtime API (#2177) Adds a `NodeFeatures` bitfield value to the runtime `HostConfiguration`, with the purpose of coordinating the enabling of node-side features, such as: https://github.com/paritytech/polkadot-sdk/issues/628 and https://github.com/paritytech/polkadot-sdk/issues/598. These are features that require all validators enable them at the same time, assuming all/most nodes have upgraded their node versions. This PR doesn't add any feature yet. These are coming in future PRs. Also adds a runtime API for querying the state of the client features and an extrinsic for setting/unsetting a feature by its index in the bitfield. Note: originally part of: https://github.com/paritytech/polkadot-sdk/pull/1644, but posted as standalone to be reused by other PRs until the initial PR is merged --- Cargo.lock | 1 + .../src/blockchain_rpc_client.rs | 5 + .../src/rpc_client.rs | 12 +- .../emulated/chains/relays/rococo/src/lib.rs | 2 +- .../emulated/chains/relays/westend/src/lib.rs | 2 +- polkadot/node/core/runtime-api/src/cache.rs | 20 +- polkadot/node/core/runtime-api/src/lib.rs | 23 +- polkadot/node/core/runtime-api/src/tests.rs | 16 +- polkadot/node/subsystem-types/src/messages.rs | 17 +- .../subsystem-types/src/runtime_client.rs | 22 +- .../node/subsystem-util/src/runtime/mod.rs | 33 ++- polkadot/primitives/Cargo.toml | 2 +- polkadot/primitives/src/runtime_api.rs | 15 +- polkadot/primitives/src/vstaging/mod.rs | 5 + .../runtime/parachains/src/configuration.rs | 53 +++- .../src/configuration/benchmarking.rs | 2 + .../parachains/src/configuration/migration.rs | 1 + .../src/configuration/migration/v10.rs | 277 ++++++++++++++++++ .../src/configuration/migration/v9.rs | 107 ++++++- .../parachains/src/configuration/tests.rs | 8 + .../src/runtime_api_impl/vstaging.rs | 9 +- polkadot/runtime/rococo/src/lib.rs | 17 +- .../runtime_parachains_configuration.rs | 86 +++--- polkadot/runtime/westend/src/lib.rs | 19 +- .../runtime_parachains_configuration.rs | 86 +++--- 25 files changed, 709 insertions(+), 131 deletions(-) create mode 100644 polkadot/runtime/parachains/src/configuration/migration/v10.rs diff --git a/Cargo.lock b/Cargo.lock index d8308086822..196f580e0d5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1527,6 +1527,7 @@ checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", + "serde", "tap", "wyz", ] diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index 1e78df71154..a473b3bced0 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -24,6 +24,7 @@ use polkadot_overseer::RuntimeApiSubsystemClient; use polkadot_primitives::{ async_backing::{AsyncBackingParams, BackingState}, slashing, + vstaging::NodeFeatures, }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; use sp_api::{ApiError, RuntimeApiInfo}; @@ -364,6 +365,10 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { ) -> Result, ApiError> { Ok(self.rpc_client.parachain_host_para_backing_state(at, para_id).await?) } + + async fn node_features(&self, at: Hash) -> Result { + Ok(self.rpc_client.parachain_host_node_features(at).await?) + } } #[async_trait::async_trait] diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index 90af334e133..cc993c6ff9f 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -31,7 +31,9 @@ use parity_scale_codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain::{ async_backing::{AsyncBackingParams, BackingState}, - slashing, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + slashing, + vstaging::NodeFeatures, + BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, OccupiedCoreAssumption, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, @@ -597,6 +599,14 @@ impl RelayChainRpcClient { .await } + pub async fn parachain_host_node_features( + &self, + at: RelayHash, + ) -> Result { + self.call_remote_runtime_function("ParachainHost_node_features", at, None::<()>) + .await + } + pub async fn parachain_host_disabled_validators( &self, at: RelayHash, diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs index f806f4a5d9e..7ace9614710 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs @@ -24,7 +24,7 @@ use emulated_integration_tests_common::{ // Rococo declaration decl_test_relay_chains! { - #[api_version(8)] + #[api_version(9)] pub struct Rococo { genesis = genesis::genesis(), on_init = (), diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs index d4ba1b6cfe7..2ba47250d56 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs @@ -24,7 +24,7 @@ use emulated_integration_tests_common::{ // Westend declaration decl_test_relay_chains! { - #[api_version(8)] + #[api_version(9)] pub struct Westend { genesis = genesis::genesis(), on_init = (), diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 69eea22b23b..8a7a3dc08b8 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -20,7 +20,7 @@ use schnellru::{ByLength, LruMap}; use sp_consensus_babe::Epoch; use polkadot_primitives::{ - async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, + async_backing, slashing, vstaging, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, @@ -67,6 +67,7 @@ pub(crate) struct RequestResultCache { disabled_validators: LruMap>, para_backing_state: LruMap<(Hash, ParaId), Option>, async_backing_params: LruMap, + node_features: LruMap, } impl Default for RequestResultCache { @@ -100,6 +101,7 @@ impl Default for RequestResultCache { disabled_validators: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + node_features: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), } } } @@ -446,6 +448,21 @@ impl RequestResultCache { self.minimum_backing_votes.insert(session_index, minimum_backing_votes); } + pub(crate) fn node_features( + &mut self, + session_index: SessionIndex, + ) -> Option<&vstaging::NodeFeatures> { + self.node_features.get(&session_index).map(|f| &*f) + } + + pub(crate) fn cache_node_features( + &mut self, + session_index: SessionIndex, + features: vstaging::NodeFeatures, + ) { + self.node_features.insert(session_index, features); + } + pub(crate) fn disabled_validators( &mut self, relay_parent: &Hash, @@ -540,4 +557,5 @@ pub(crate) enum RequestResult { DisabledValidators(Hash, Vec), ParaBackingState(Hash, ParaId, Option), AsyncBackingParams(Hash, async_backing::AsyncBackingParams), + NodeFeatures(SessionIndex, vstaging::NodeFeatures), } diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index bdcca08b10d..8689355c413 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -173,6 +173,8 @@ where .cache_para_backing_state((relay_parent, para_id), constraints), AsyncBackingParams(relay_parent, params) => self.requests_cache.cache_async_backing_params(relay_parent, params), + NodeFeatures(session_index, params) => + self.requests_cache.cache_node_features(session_index, params), } } @@ -313,6 +315,15 @@ where Some(Request::MinimumBackingVotes(index, sender)) } }, + Request::NodeFeatures(index, sender) => { + if let Some(value) = self.requests_cache.node_features(index) { + self.metrics.on_cached_request(); + let _ = sender.send(Ok(value.clone())); + None + } else { + Some(Request::NodeFeatures(index, sender)) + } + }, } } @@ -408,6 +419,9 @@ where macro_rules! query { ($req_variant:ident, $api_name:ident ($($param:expr),*), ver = $version:expr, $sender:expr) => {{ + query!($req_variant, $api_name($($param),*), ver = $version, $sender, result = ( relay_parent $(, $param )* ) ) + }}; + ($req_variant:ident, $api_name:ident ($($param:expr),*), ver = $version:expr, $sender:expr, result = ( $($results:expr),* ) ) => {{ let sender = $sender; let version: u32 = $version; // enforce type for the version expression let runtime_version = client.api_version_parachain_host(relay_parent).await @@ -441,7 +455,7 @@ where metrics.on_request(res.is_ok()); let _ = sender.send(res.clone()); - res.ok().map(|res| RequestResult::$req_variant(relay_parent, $( $param, )* res)) + res.ok().map(|res| RequestResult::$req_variant($( $results, )* res)) }} } @@ -591,5 +605,12 @@ where sender ) }, + Request::NodeFeatures(index, sender) => query!( + NodeFeatures, + node_features(), + ver = Request::NODE_FEATURES_RUNTIME_REQUIREMENT, + sender, + result = (index) + ), } } diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index 979b3587d26..b939bffb0e7 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -20,12 +20,12 @@ use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfigurati use polkadot_node_subsystem::SpawnGlue; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_primitives::{ - async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, ValidatorId, - ValidatorIndex, ValidatorSignature, + async_backing, slashing, vstaging::NodeFeatures, AuthorityDiscoveryId, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, + DisputeState, ExecutorParams, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_api::ApiError; use sp_core::testing::TaskExecutor; @@ -269,6 +269,10 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { todo!("Not required for tests") } + async fn node_features(&self, _: Hash) -> Result { + todo!("Not required for tests") + } + async fn disabled_validators(&self, _: Hash) -> Result, ApiError> { todo!("Not required for tests") } diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 4ddffc6dc5e..43456daec30 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -42,12 +42,12 @@ use polkadot_node_primitives::{ ValidationResult, }; use polkadot_primitives::{ - async_backing, slashing, AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, - CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, - CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, - Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, + async_backing, slashing, vstaging::NodeFeatures, AuthorityDiscoveryId, BackedCandidate, + BlockNumber, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, + CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupIndex, + GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, + PvfCheckStatement, PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; @@ -718,6 +718,8 @@ pub enum RuntimeApiRequest { /// /// If it's not supported by the Runtime, the async backing is said to be disabled. AsyncBackingParams(RuntimeApiSender), + /// Get the node features. + NodeFeatures(SessionIndex, RuntimeApiSender), } impl RuntimeApiRequest { @@ -746,6 +748,9 @@ impl RuntimeApiRequest { /// `DisabledValidators` pub const DISABLED_VALIDATORS_RUNTIME_REQUIREMENT: u32 = 8; + + /// `Node features` + pub const NODE_FEATURES_RUNTIME_REQUIREMENT: u32 = 9; } /// A message to the Runtime API subsystem. diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index f7adcf9862b..8369fd215f4 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -16,12 +16,12 @@ use async_trait::async_trait; use polkadot_primitives::{ - async_backing, runtime_api::ParachainHost, slashing, Block, BlockNumber, CandidateCommitments, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, Hash, Id, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, runtime_api::ParachainHost, slashing, vstaging, Block, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, + DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id, InboundDownwardMessage, + InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::{ApiError, ApiExt, ProvideRuntimeApi}; @@ -257,8 +257,14 @@ pub trait RuntimeApiSubsystemClient { ) -> Result, ApiError>; // === v8 === + /// Gets the disabled validators at a specific block height async fn disabled_validators(&self, at: Hash) -> Result, ApiError>; + + // === v9 === + + /// Get the node features. + async fn node_features(&self, at: Hash) -> Result; } /// Default implementation of [`RuntimeApiSubsystemClient`] using the client. @@ -508,6 +514,10 @@ where self.client.runtime_api().async_backing_params(at) } + async fn node_features(&self, at: Hash) -> Result { + self.client.runtime_api().node_features(at) + } + async fn disabled_validators(&self, at: Hash) -> Result, ApiError> { self.client.runtime_api().disabled_validators(at) } diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 8d7cef88a70..aada7a5d77a 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -30,8 +30,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ - slashing, AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, EncodeAs, - ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore, + slashing, vstaging::NodeFeatures, AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, + EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, }; @@ -507,3 +507,32 @@ pub async fn request_min_backing_votes( min_backing_votes_res } } + +/// Request the node features enabled in the runtime. +/// Pass in the session index for caching purposes, as it should only change on session boundaries. +/// Prior to runtime API version 9, just return `None`. +pub async fn request_node_features( + parent: Hash, + session_index: SessionIndex, + sender: &mut impl overseer::SubsystemSender, +) -> Result> { + let res = recv_runtime( + request_from_runtime(parent, sender, |tx| { + RuntimeApiRequest::NodeFeatures(session_index, tx) + }) + .await, + ) + .await; + + if let Err(Error::RuntimeRequest(RuntimeApiError::NotSupported { .. })) = res { + gum::trace!( + target: LOG_TARGET, + ?parent, + "Querying the node features from the runtime is not supported by the current Runtime API", + ); + + Ok(None) + } else { + res.map(Some) + } +} diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index b318c2d4be7..316644a372d 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -7,7 +7,7 @@ license.workspace = true description = "Shared primitives used by Polkadot runtime" [dependencies] -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } +bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "serde"] } hex-literal = "0.4.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index 5ec897c8cbb..e4c1d590f45 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -114,10 +114,10 @@ //! separated from the stable primitives. use crate::{ - async_backing, slashing, AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, + async_backing, slashing, vstaging, AsyncBackingParams, BlockNumber, CandidateCommitments, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, OccupiedCoreAssumption, PersistedValidationData, + PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature, }; use parity_scale_codec::{Decode, Encode}; @@ -264,5 +264,12 @@ sp_api::decl_runtime_apis! { /// Returns a list of all disabled validators at the given block. #[api_version(8)] fn disabled_validators() -> Vec; + + /***** Added in v9 *****/ + + /// Get node features. + /// This is a staging method! Do not use on production runtimes! + #[api_version(9)] + fn node_features() -> vstaging::NodeFeatures; } } diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 1429b0c326a..083e0f42d56 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -17,3 +17,8 @@ //! Staging Primitives. // Put any primitives used by staging APIs functions here + +use bitvec::vec::BitVec; + +/// Bit indices in the `HostConfiguration.node_features` that correspond to different node features. +pub type NodeFeatures = BitVec; diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index d85c267496f..bff9cc34b4f 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -26,8 +26,8 @@ use polkadot_parachain_primitives::primitives::{ MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM, }; use primitives::{ - AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, SessionIndex, - LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, + vstaging::NodeFeatures, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, + SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; @@ -261,6 +261,8 @@ pub struct HostConfiguration { /// The minimum number of valid backing statements required to consider a parachain candidate /// backable. pub minimum_backing_votes: u32, + /// Node features enablement. + pub node_features: NodeFeatures, } impl> Default for HostConfiguration { @@ -312,6 +314,7 @@ impl> Default for HostConfiguration Weight; fn set_config_with_executor_params() -> Weight; fn set_config_with_perbill() -> Weight; + fn set_node_feature() -> Weight; } pub struct TestWeightInfo; @@ -488,6 +492,9 @@ impl WeightInfo for TestWeightInfo { fn set_config_with_perbill() -> Weight { Weight::MAX } + fn set_node_feature() -> Weight { + Weight::MAX + } } #[frame_support::pallet] @@ -496,18 +503,19 @@ pub mod pallet { /// The current storage version. /// - /// v0-v1: - /// v1-v2: - /// v2-v3: - /// v3-v4: - /// v4-v5: - /// + - /// + - /// v5-v6: (remove UMP dispatch queue) - /// v6-v7: - /// v7-v8: - /// v8-v9: - const STORAGE_VERSION: StorageVersion = StorageVersion::new(9); + /// v0-v1: + /// v1-v2: + /// v2-v3: + /// v3-v4: + /// v4-v5: + /// + + /// + + /// v5-v6: (remove UMP dispatch queue) + /// v6-v7: + /// v7-v8: + /// v8-v9: + /// v9-v10: + const STORAGE_VERSION: StorageVersion = StorageVersion::new(10); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -1195,6 +1203,23 @@ pub mod pallet { config.minimum_backing_votes = new; }) } + /// Set/Unset a node feature. + #[pallet::call_index(53)] + #[pallet::weight(( + T::WeightInfo::set_node_feature(), + DispatchClass::Operational + ))] + pub fn set_node_feature(origin: OriginFor, index: u8, value: bool) -> DispatchResult { + ensure_root(origin)?; + + Self::schedule_config_update(|config| { + let index = usize::from(index); + if config.node_features.len() <= index { + config.node_features.resize(index + 1, false); + } + config.node_features.set(index, value); + }) + } } #[pallet::hooks] diff --git a/polkadot/runtime/parachains/src/configuration/benchmarking.rs b/polkadot/runtime/parachains/src/configuration/benchmarking.rs index d9d11ab56e4..508e0579a09 100644 --- a/polkadot/runtime/parachains/src/configuration/benchmarking.rs +++ b/polkadot/runtime/parachains/src/configuration/benchmarking.rs @@ -49,6 +49,8 @@ benchmarks! { set_config_with_perbill {}: set_on_demand_fee_variability(RawOrigin::Root, Perbill::from_percent(100)) + set_node_feature{}: set_node_feature(RawOrigin::Root, 255, true) + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext(Default::default()), diff --git a/polkadot/runtime/parachains/src/configuration/migration.rs b/polkadot/runtime/parachains/src/configuration/migration.rs index 26f8a85b496..db323d3aad9 100644 --- a/polkadot/runtime/parachains/src/configuration/migration.rs +++ b/polkadot/runtime/parachains/src/configuration/migration.rs @@ -16,6 +16,7 @@ //! A module that is responsible for migration of storage. +pub mod v10; pub mod v6; pub mod v7; pub mod v8; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v10.rs b/polkadot/runtime/parachains/src/configuration/migration/v10.rs new file mode 100644 index 00000000000..3c934082dc1 --- /dev/null +++ b/polkadot/runtime/parachains/src/configuration/migration/v10.rs @@ -0,0 +1,277 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. + +use crate::configuration::{self, Config, Pallet}; +use frame_support::{pallet_prelude::*, traits::Defensive, weights::Weight}; +use frame_system::pallet_prelude::BlockNumberFor; +use primitives::{vstaging::NodeFeatures, SessionIndex}; +use sp_std::vec::Vec; + +use frame_support::traits::OnRuntimeUpgrade; + +use super::v9::V9HostConfiguration; + +type V10HostConfiguration = configuration::HostConfiguration; + +mod v9 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V9HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V9HostConfiguration>)>, + OptionQuery, + >; +} + +mod v10 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V10HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V10HostConfiguration>)>, + OptionQuery, + >; +} + +pub struct VersionUncheckedMigrateToV10(sp_std::marker::PhantomData); +impl OnRuntimeUpgrade for VersionUncheckedMigrateToV10 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV10"); + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + migrate_to_v10::() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running post_upgrade() for HostConfiguration MigrateToV10"); + ensure!( + Pallet::::on_chain_storage_version() >= StorageVersion::new(10), + "Storage version should be >= 10 after the migration" + ); + + Ok(()) + } +} + +pub type MigrateToV10 = frame_support::migrations::VersionedMigration< + 9, + 10, + VersionUncheckedMigrateToV10, + Pallet, + ::DbWeight, +>; + +// Unusual formatting is justified: +// - make it easier to verify that fields assign what they supposed to assign. +// - this code is transient and will be removed after all migrations are done. +// - this code is important enough to optimize for legibility sacrificing consistency. +#[rustfmt::skip] +fn translate(pre: V9HostConfiguration>) -> V10HostConfiguration> { + V10HostConfiguration { + max_code_size : pre.max_code_size, + max_head_data_size : pre.max_head_data_size, + max_upward_queue_count : pre.max_upward_queue_count, + max_upward_queue_size : pre.max_upward_queue_size, + max_upward_message_size : pre.max_upward_message_size, + max_upward_message_num_per_candidate : pre.max_upward_message_num_per_candidate, + hrmp_max_message_num_per_candidate : pre.hrmp_max_message_num_per_candidate, + validation_upgrade_cooldown : pre.validation_upgrade_cooldown, + validation_upgrade_delay : pre.validation_upgrade_delay, + max_pov_size : pre.max_pov_size, + max_downward_message_size : pre.max_downward_message_size, + hrmp_sender_deposit : pre.hrmp_sender_deposit, + hrmp_recipient_deposit : pre.hrmp_recipient_deposit, + hrmp_channel_max_capacity : pre.hrmp_channel_max_capacity, + hrmp_channel_max_total_size : pre.hrmp_channel_max_total_size, + hrmp_max_parachain_inbound_channels : pre.hrmp_max_parachain_inbound_channels, + hrmp_max_parachain_outbound_channels : pre.hrmp_max_parachain_outbound_channels, + hrmp_channel_max_message_size : pre.hrmp_channel_max_message_size, + code_retention_period : pre.code_retention_period, + on_demand_cores : pre.on_demand_cores, + on_demand_retries : pre.on_demand_retries, + group_rotation_frequency : pre.group_rotation_frequency, + paras_availability_period : pre.paras_availability_period, + scheduling_lookahead : pre.scheduling_lookahead, + max_validators_per_core : pre.max_validators_per_core, + max_validators : pre.max_validators, + dispute_period : pre.dispute_period, + dispute_post_conclusion_acceptance_period: pre.dispute_post_conclusion_acceptance_period, + no_show_slots : pre.no_show_slots, + n_delay_tranches : pre.n_delay_tranches, + zeroth_delay_tranche_width : pre.zeroth_delay_tranche_width, + needed_approvals : pre.needed_approvals, + relay_vrf_modulo_samples : pre.relay_vrf_modulo_samples, + pvf_voting_ttl : pre.pvf_voting_ttl, + minimum_validation_upgrade_delay : pre.minimum_validation_upgrade_delay, + async_backing_params : pre.async_backing_params, + executor_params : pre.executor_params, + on_demand_queue_max_size : pre.on_demand_queue_max_size, + on_demand_base_fee : pre.on_demand_base_fee, + on_demand_fee_variability : pre.on_demand_fee_variability, + on_demand_target_queue_utilization : pre.on_demand_target_queue_utilization, + on_demand_ttl : pre.on_demand_ttl, + minimum_backing_votes : pre.minimum_backing_votes, + node_features : NodeFeatures::EMPTY + } +} + +fn migrate_to_v10() -> Weight { + let v9 = v9::ActiveConfig::::get() + .defensive_proof("Could not decode old config") + .unwrap_or_default(); + let v10 = translate::(v9); + v10::ActiveConfig::::set(Some(v10)); + + // Allowed to be empty. + let pending_v9 = v9::PendingConfigs::::get().unwrap_or_default(); + let mut pending_v10 = Vec::new(); + + for (session, v9) in pending_v9.into_iter() { + let v10 = translate::(v9); + pending_v10.push((session, v10)); + } + v10::PendingConfigs::::set(Some(pending_v10.clone())); + + let num_configs = (pending_v10.len() + 1) as u64; + T::DbWeight::get().reads_writes(num_configs, num_configs) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{new_test_ext, Test}; + use primitives::LEGACY_MIN_BACKING_VOTES; + + #[test] + fn v10_deserialized_from_actual_data() { + // Example how to get new `raw_config`: + // We'll obtain the raw_config at a specified a block + // Steps: + // 1. Go to Polkadot.js -> Developer -> Chain state -> Storage: https://polkadot.js.org/apps/#/chainstate + // 2. Set these parameters: + // 2.1. selected state query: configuration; activeConfig(): + // PolkadotRuntimeParachainsConfigurationHostConfiguration + // 2.2. blockhash to query at: + // 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of + // the block) + // 2.3. Note the value of encoded storage key -> + // 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the + // referenced block. + // 2.4. You'll also need the decoded values to update the test. + // 3. Go to Polkadot.js -> Developer -> Chain state -> Raw storage + // 3.1 Enter the encoded storage key and you get the raw config. + + // This exceeds the maximal line width length, but that's fine, since this is not code and + // doesn't need to be read and also leaving it as one line allows to easily copy it. + let raw_config = + hex_literal::hex![" + 0000300000800000080000000000100000c8000005000000050000000200000002000000000000000000000000005000000010000400000000000000000000000000000000000000000000000000000000000000000000000800000000200000040000000000100000b004000000000000000000001027000080b2e60e80c3c90180969800000000000000000000000000050000001400000004000000010000000101000000000600000064000000020000001900000000000000020000000200000002000000050000000200000000" + ]; + + let v10 = + V10HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + + // We check only a sample of the values here. If we missed any fields or messed up data + // types that would skew all the fields coming after. + assert_eq!(v10.max_code_size, 3_145_728); + assert_eq!(v10.validation_upgrade_cooldown, 2); + assert_eq!(v10.max_pov_size, 5_242_880); + assert_eq!(v10.hrmp_channel_max_message_size, 1_048_576); + assert_eq!(v10.n_delay_tranches, 25); + assert_eq!(v10.minimum_validation_upgrade_delay, 5); + assert_eq!(v10.group_rotation_frequency, 20); + assert_eq!(v10.on_demand_cores, 0); + assert_eq!(v10.on_demand_base_fee, 10_000_000); + assert_eq!(v10.minimum_backing_votes, LEGACY_MIN_BACKING_VOTES); + assert_eq!(v10.node_features, NodeFeatures::EMPTY); + } + + // Test that `migrate_to_v10`` correctly applies the `translate` function to current and pending + // configs. + #[test] + fn test_migrate_to_v10() { + // Host configuration has lots of fields. However, in this migration we only add one + // field. The most important part to check are a couple of the last fields. We also pick + // extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and + // also their type. + // + // We specify only the picked fields and the rest should be provided by the `Default` + // implementation. That implementation is copied over between the two types and should work + // fine. + let v9 = V9HostConfiguration:: { + needed_approvals: 69, + paras_availability_period: 55, + hrmp_recipient_deposit: 1337, + max_pov_size: 1111, + minimum_validation_upgrade_delay: 20, + ..Default::default() + }; + + let mut pending_configs = Vec::new(); + pending_configs.push((100, v9.clone())); + pending_configs.push((300, v9.clone())); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v9 version in the state. + v9::ActiveConfig::::set(Some(v9.clone())); + v9::PendingConfigs::::set(Some(pending_configs)); + + migrate_to_v10::(); + + let v10 = translate::(v9); + let mut configs_to_check = v10::PendingConfigs::::get().unwrap(); + configs_to_check.push((0, v10::ActiveConfig::::get().unwrap())); + + for (_, config) in configs_to_check { + assert_eq!(config, v10); + assert_eq!(config.node_features, NodeFeatures::EMPTY); + } + }); + } + + // Test that migration doesn't panic in case there're no pending configurations upgrades in + // pallet's storage. + #[test] + fn test_migrate_to_v10_no_pending() { + let v9 = V9HostConfiguration::::default(); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v9 version in the state. + v9::ActiveConfig::::set(Some(v9)); + // Ensure there're no pending configs. + v9::PendingConfigs::::set(None); + + // Shouldn't fail. + migrate_to_v10::(); + }); + } +} diff --git a/polkadot/runtime/parachains/src/configuration/migration/v9.rs b/polkadot/runtime/parachains/src/configuration/migration/v9.rs index e37f0b9b0e3..ca4bbd9dace 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v9.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v9.rs @@ -23,13 +23,116 @@ use frame_support::{ weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{SessionIndex, LEGACY_MIN_BACKING_VOTES}; +use primitives::{ + AsyncBackingParams, Balance, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, + ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, +}; +use sp_runtime::Perbill; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; use super::v8::V8HostConfiguration; -type V9HostConfiguration = configuration::HostConfiguration; +/// All configuration of the runtime with respect to paras. +#[derive(Clone, Encode, Decode, Debug)] +pub struct V9HostConfiguration { + pub max_code_size: u32, + pub max_head_data_size: u32, + pub max_upward_queue_count: u32, + pub max_upward_queue_size: u32, + pub max_upward_message_size: u32, + pub max_upward_message_num_per_candidate: u32, + pub hrmp_max_message_num_per_candidate: u32, + pub validation_upgrade_cooldown: BlockNumber, + pub validation_upgrade_delay: BlockNumber, + pub async_backing_params: AsyncBackingParams, + pub max_pov_size: u32, + pub max_downward_message_size: u32, + pub hrmp_max_parachain_outbound_channels: u32, + pub hrmp_sender_deposit: Balance, + pub hrmp_recipient_deposit: Balance, + pub hrmp_channel_max_capacity: u32, + pub hrmp_channel_max_total_size: u32, + pub hrmp_max_parachain_inbound_channels: u32, + pub hrmp_channel_max_message_size: u32, + pub executor_params: ExecutorParams, + pub code_retention_period: BlockNumber, + pub on_demand_cores: u32, + pub on_demand_retries: u32, + pub on_demand_queue_max_size: u32, + pub on_demand_target_queue_utilization: Perbill, + pub on_demand_fee_variability: Perbill, + pub on_demand_base_fee: Balance, + pub on_demand_ttl: BlockNumber, + pub group_rotation_frequency: BlockNumber, + pub paras_availability_period: BlockNumber, + pub scheduling_lookahead: u32, + pub max_validators_per_core: Option, + pub max_validators: Option, + pub dispute_period: SessionIndex, + pub dispute_post_conclusion_acceptance_period: BlockNumber, + pub no_show_slots: u32, + pub n_delay_tranches: u32, + pub zeroth_delay_tranche_width: u32, + pub needed_approvals: u32, + pub relay_vrf_modulo_samples: u32, + pub pvf_voting_ttl: SessionIndex, + pub minimum_validation_upgrade_delay: BlockNumber, + pub minimum_backing_votes: u32, +} + +impl> Default for V9HostConfiguration { + fn default() -> Self { + Self { + async_backing_params: AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: 0, + }, + group_rotation_frequency: 1u32.into(), + paras_availability_period: 1u32.into(), + no_show_slots: 1u32.into(), + validation_upgrade_cooldown: Default::default(), + validation_upgrade_delay: 2u32.into(), + code_retention_period: Default::default(), + max_code_size: Default::default(), + max_pov_size: Default::default(), + max_head_data_size: Default::default(), + on_demand_cores: Default::default(), + on_demand_retries: Default::default(), + scheduling_lookahead: 1, + max_validators_per_core: Default::default(), + max_validators: None, + dispute_period: 6, + dispute_post_conclusion_acceptance_period: 100.into(), + n_delay_tranches: Default::default(), + zeroth_delay_tranche_width: Default::default(), + needed_approvals: Default::default(), + relay_vrf_modulo_samples: Default::default(), + max_upward_queue_count: Default::default(), + max_upward_queue_size: Default::default(), + max_downward_message_size: Default::default(), + max_upward_message_size: Default::default(), + max_upward_message_num_per_candidate: Default::default(), + hrmp_sender_deposit: Default::default(), + hrmp_recipient_deposit: Default::default(), + hrmp_channel_max_capacity: Default::default(), + hrmp_channel_max_total_size: Default::default(), + hrmp_max_parachain_inbound_channels: Default::default(), + hrmp_channel_max_message_size: Default::default(), + hrmp_max_parachain_outbound_channels: Default::default(), + hrmp_max_message_num_per_candidate: Default::default(), + pvf_voting_ttl: 2u32.into(), + minimum_validation_upgrade_delay: 2.into(), + executor_params: Default::default(), + on_demand_queue_max_size: ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + on_demand_base_fee: 10_000_000u128, + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_target_queue_utilization: Perbill::from_percent(25), + on_demand_ttl: 5u32.into(), + minimum_backing_votes: LEGACY_MIN_BACKING_VOTES, + } + } +} mod v8 { use super::*; diff --git a/polkadot/runtime/parachains/src/configuration/tests.rs b/polkadot/runtime/parachains/src/configuration/tests.rs index ea39628c958..b62a45355e1 100644 --- a/polkadot/runtime/parachains/src/configuration/tests.rs +++ b/polkadot/runtime/parachains/src/configuration/tests.rs @@ -16,6 +16,7 @@ use super::*; use crate::mock::{new_test_ext, Configuration, ParasShared, RuntimeOrigin, Test}; +use bitvec::{bitvec, prelude::Lsb0}; use frame_support::{assert_err, assert_noop, assert_ok}; fn on_new_session(session_index: SessionIndex) -> (HostConfiguration, HostConfiguration) { @@ -318,6 +319,7 @@ fn setting_pending_config_members() { on_demand_target_queue_utilization: Perbill::from_percent(25), on_demand_ttl: 5u32, minimum_backing_votes: 5, + node_features: bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], }; Configuration::set_validation_upgrade_cooldown( @@ -473,6 +475,12 @@ fn setting_pending_config_members() { new_config.minimum_backing_votes, ) .unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 1, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 1, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 3, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 10, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 10, false).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 11, true).unwrap(); assert_eq!(PendingConfigs::::get(), vec![(shared::SESSION_DELAY, new_config)],); }) diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 24a076f3a44..200fd57915f 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,8 +16,8 @@ //! Put implementations of functions from staging APIs here. -use crate::shared; -use primitives::ValidatorIndex; +use crate::{configuration, initializer, shared}; +use primitives::{vstaging::NodeFeatures, ValidatorIndex}; use sp_std::{collections::btree_map::BTreeMap, prelude::Vec}; /// Implementation for `DisabledValidators` @@ -42,3 +42,8 @@ where .filter_map(|v| reverse_index.get(v).cloned()) .collect() } + +/// Returns the current state of the node features. +pub fn node_features() -> NodeFeatures { + >::config().node_features +} diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 40ef22107a7..5a1e170862e 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -23,11 +23,12 @@ use pallet_nis::WithMaximumOf; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, - OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionInfo, Signature, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, + slashing, vstaging::NodeFeatures, AccountId, AccountIndex, Balance, BlockNumber, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, + ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, + ValidatorIndex, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ assigned_slots, auctions, claims, crowdloan, impl_runtime_weights, @@ -1499,6 +1500,7 @@ pub mod migrations { frame_support::migrations::RemovePallet::DbWeight>, pallet_grandpa::migrations::MigrateV4ToV5, + parachains_configuration::migration::v10::MigrateToV10, ); } @@ -1660,7 +1662,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(8)] + #[api_version(9)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1808,6 +1810,9 @@ sp_api::impl_runtime_apis! { parachains_staging_runtime_api_impl::disabled_validators::() } + fn node_features() -> NodeFeatures { + parachains_staging_runtime_api_impl::node_features::() + } } #[api_version(3)] diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs index 29f38765778..34541b83597 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -31,11 +31,11 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::configuration // --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,11 +56,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_051_000 picoseconds. - Weight::from_parts(9_496_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_793_000 picoseconds. + Weight::from_parts(8_192_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -72,11 +72,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_104_000 picoseconds. - Weight::from_parts(9_403_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_819_000 picoseconds. + Weight::from_parts(8_004_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -88,11 +88,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_112_000 picoseconds. - Weight::from_parts(9_495_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_760_000 picoseconds. + Weight::from_parts(8_174_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -114,11 +114,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_011_000 picoseconds. - Weight::from_parts(9_460_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_814_000 picoseconds. + Weight::from_parts(8_098_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -130,11 +130,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_940_000 picoseconds. - Weight::from_parts(10_288_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_028_000 picoseconds. + Weight::from_parts(10_386_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -146,11 +146,27 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_perbill() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_192_000 picoseconds. - Weight::from_parts(9_595_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_867_000 picoseconds. + Weight::from_parts(8_191_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_node_feature() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_158_000 picoseconds. + Weight::from_parts(10_430_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index d80640c016f..d3862aff257 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -46,12 +46,12 @@ use pallet_session::historical as session_historical; use pallet_transaction_payment::{CurrencyAdapter, FeeDetails, RuntimeDispatchInfo}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, PARACHAIN_KEY_TYPE_ID, + slashing, vstaging::NodeFeatures, AccountId, AccountIndex, Balance, BlockNumber, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, + PvfCheckStatement, ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ assigned_slots, auctions, crowdloan, @@ -1559,6 +1559,7 @@ pub mod migrations { pallet_referenda::migration::v1::MigrateV0ToV1, pallet_nomination_pools::migration::versioned_migrations::V6ToV7, pallet_grandpa::migrations::MigrateV4ToV5, + parachains_configuration::migration::v10::MigrateToV10, ); } @@ -1699,7 +1700,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(8)] + #[api_version(9)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1846,6 +1847,10 @@ sp_api::impl_runtime_apis! { fn disabled_validators() -> Vec { parachains_staging_runtime_api_impl::disabled_validators::() } + + fn node_features() -> NodeFeatures { + parachains_staging_runtime_api_impl::node_features::() + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs index 585dc9058f2..3a4813b667c 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -31,11 +31,11 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::configuration // --chain=westend-dev -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,11 +56,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_616_000 picoseconds. - Weight::from_parts(9_961_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 8_065_000 picoseconds. + Weight::from_parts(8_389_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -72,11 +72,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_587_000 picoseconds. - Weight::from_parts(9_964_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 8_038_000 picoseconds. + Weight::from_parts(8_463_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -88,11 +88,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_650_000 picoseconds. - Weight::from_parts(9_960_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_843_000 picoseconds. + Weight::from_parts(8_216_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -114,11 +114,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_545_000 picoseconds. - Weight::from_parts(9_845_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_969_000 picoseconds. + Weight::from_parts(8_362_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -130,11 +130,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 10_258_000 picoseconds. - Weight::from_parts(10_607_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_084_000 picoseconds. + Weight::from_parts(10_451_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -146,11 +146,27 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_perbill() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_502_000 picoseconds. - Weight::from_parts(9_902_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_948_000 picoseconds. + Weight::from_parts(8_268_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_node_feature() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_257_000 picoseconds. + Weight::from_parts(10_584_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } -- GitLab From f517900a48ada81764e9d75d1e9d598ea4b84719 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Tue, 14 Nov 2023 21:32:14 +0100 Subject: [PATCH 31/74] Contracts expose pallet-xcm (#1248) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR introduces: - XCM host functions `xcm_send`, `xcm_execute` - An Xcm trait into the config. that proxy these functions to to `pallet_xcm`, or disable their usage by using `()`. - A mock_network and xcm_test files to test the newly added xcm-related functions. --------- Co-authored-by: Keith Yeung Co-authored-by: Sasha Gryaznov Co-authored-by: command-bot <> Co-authored-by: Francisco Aguirre Co-authored-by: Alexander Theißen --- Cargo.lock | 43 +++ Cargo.toml | 1 + .../contracts-rococo/src/contracts.rs | 1 + polkadot/xcm/xcm-builder/src/controller.rs | 2 +- polkadot/xcm/xcm-builder/src/lib.rs | 2 +- substrate/bin/node/runtime/src/lib.rs | 1 + substrate/frame/contracts/Cargo.toml | 16 + .../contracts/fixtures/data/xcm_execute.wat | 52 +++ .../contracts/fixtures/data/xcm_send.wat | 59 +++ substrate/frame/contracts/fixtures/src/lib.rs | 2 + .../frame/contracts/mock-network/Cargo.toml | 91 +++++ .../frame/contracts/mock-network/src/lib.rs | 151 ++++++++ .../frame/contracts/mock-network/src/mocks.rs | 18 + .../mock-network/src/mocks/msg_queue.rs | 168 +++++++++ .../src/mocks/relay_message_queue.rs | 52 +++ .../contracts/mock-network/src/parachain.rs | 353 ++++++++++++++++++ .../src/parachain/contracts_config.rs | 98 +++++ .../contracts/mock-network/src/primitives.rs | 23 ++ .../contracts/mock-network/src/relay_chain.rs | 236 ++++++++++++ .../frame/contracts/mock-network/src/tests.rs | 239 ++++++++++++ substrate/frame/contracts/src/lib.rs | 10 + substrate/frame/contracts/src/tests.rs | 1 + substrate/frame/contracts/src/wasm/runtime.rs | 193 +++++++++- 23 files changed, 1797 insertions(+), 15 deletions(-) create mode 100644 substrate/frame/contracts/fixtures/data/xcm_execute.wat create mode 100644 substrate/frame/contracts/fixtures/data/xcm_send.wat create mode 100644 substrate/frame/contracts/mock-network/Cargo.toml create mode 100644 substrate/frame/contracts/mock-network/src/lib.rs create mode 100644 substrate/frame/contracts/mock-network/src/mocks.rs create mode 100644 substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs create mode 100644 substrate/frame/contracts/mock-network/src/mocks/relay_message_queue.rs create mode 100644 substrate/frame/contracts/mock-network/src/parachain.rs create mode 100644 substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs create mode 100644 substrate/frame/contracts/mock-network/src/primitives.rs create mode 100644 substrate/frame/contracts/mock-network/src/relay_chain.rs create mode 100644 substrate/frame/contracts/mock-network/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 196f580e0d5..9ae1bb905b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9699,11 +9699,13 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "log", + "pallet-assets", "pallet-balances", "pallet-contracts-fixtures", "pallet-contracts-primitives", "pallet-contracts-proc-macro", "pallet-insecure-randomness-collective-flip", + "pallet-message-queue", "pallet-proxy", "pallet-timestamp", "pallet-utility", @@ -9720,6 +9722,9 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-std 8.0.0", + "sp-tracing 10.0.0", + "staging-xcm", + "staging-xcm-builder", "wasm-instrument 0.4.0", "wasmi", "wat", @@ -9734,6 +9739,44 @@ dependencies = [ "wat", ] +[[package]] +name = "pallet-contracts-mock-network" +version = "1.0.0" +dependencies = [ + "assert_matches", + "frame-support", + "frame-system", + "pallet-assets", + "pallet-balances", + "pallet-contracts", + "pallet-contracts-fixtures", + "pallet-contracts-primitives", + "pallet-contracts-proc-macro", + "pallet-insecure-randomness-collective-flip", + "pallet-message-queue", + "pallet-proxy", + "pallet-timestamp", + "pallet-utility", + "pallet-xcm", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-parachains", + "pretty_assertions", + "scale-info", + "sp-api", + "sp-core", + "sp-io", + "sp-keystore", + "sp-runtime", + "sp-std 8.0.0", + "sp-tracing 10.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "xcm-simulator", +] + [[package]] name = "pallet-contracts-primitives" version = "24.0.0" diff --git a/Cargo.toml b/Cargo.toml index 15a7d5c35bf..f9779681cae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -293,6 +293,7 @@ members = [ "substrate/frame/contracts/fixtures", "substrate/frame/contracts/primitives", "substrate/frame/contracts/proc-macro", + "substrate/frame/contracts/mock-network", "substrate/frame/conviction-voting", "substrate/frame/core-fellowship", "substrate/frame/democracy", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs index b86f797ee03..6c100deaa9e 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs @@ -72,4 +72,5 @@ impl Config for Runtime { type RuntimeHoldReason = RuntimeHoldReason; type Debug = (); type Environment = (); + type Xcm = pallet_xcm::Pallet; } diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs index 0ee638b73e1..931d812eaaf 100644 --- a/polkadot/xcm/xcm-builder/src/controller.rs +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -21,7 +21,7 @@ use frame_support::pallet_prelude::DispatchError; use sp_std::boxed::Box; use xcm::prelude::*; -use xcm_executor::traits::QueryHandler; +pub use xcm_executor::traits::QueryHandler; /// Umbrella trait for all Controller traits. pub trait Controller: diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 35f95b85c89..455f17a5348 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -119,5 +119,5 @@ pub use pay::{FixedLocation, LocatableAssetId, PayAccountId32OnChainOverXcm, Pay mod controller; pub use controller::{ Controller, ExecuteController, ExecuteControllerWeightInfo, QueryController, - QueryControllerWeightInfo, SendController, SendControllerWeightInfo, + QueryControllerWeightInfo, QueryHandler, SendController, SendControllerWeightInfo, }; diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index e9adc48ff9c..90946b71311 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1353,6 +1353,7 @@ impl pallet_contracts::Config for Runtime { type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type Debug = (); type Environment = (); + type Xcm = (); } impl pallet_sudo::Config for Runtime { diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 0eb50c2b0ba..239b0865e0f 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -48,6 +48,9 @@ sp-io = { path = "../../primitives/io", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} +xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false} + [dev-dependencies] array-bytes = "6.1" assert_matches = "1" @@ -56,13 +59,19 @@ pretty_assertions = "1" wat = "1" pallet-contracts-fixtures = { path = "./fixtures" } +# Polkadot Dependencies +xcm-builder = {package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder"} + # Substrate Dependencies pallet-balances = { path = "../balances" } pallet-timestamp = { path = "../timestamp" } +pallet-message-queue = { path = "../message-queue" } pallet-insecure-randomness-collective-flip = { path = "../insecure-randomness-collective-flip" } pallet-utility = { path = "../utility" } +pallet-assets = { path = "../assets" } pallet-proxy = { path = "../proxy" } sp-keystore = { path = "../../primitives/keystore" } +sp-tracing = { path = "../../primitives/tracing" } [features] default = [ "std" ] @@ -92,12 +101,16 @@ std = [ "sp-std/std", "wasm-instrument/std", "wasmi/std", + "xcm-builder/std", + "xcm/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", @@ -105,12 +118,15 @@ runtime-benchmarks = [ "rand_pcg", "sp-runtime/runtime-benchmarks", "wasm-instrument", + "xcm-builder/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", + "pallet-assets/try-runtime", "pallet-balances/try-runtime", "pallet-insecure-randomness-collective-flip/try-runtime", + "pallet-message-queue/try-runtime", "pallet-proxy/try-runtime", "pallet-timestamp/try-runtime", "pallet-utility/try-runtime", diff --git a/substrate/frame/contracts/fixtures/data/xcm_execute.wat b/substrate/frame/contracts/fixtures/data/xcm_execute.wat new file mode 100644 index 00000000000..b3459996a2e --- /dev/null +++ b/substrate/frame/contracts/fixtures/data/xcm_execute.wat @@ -0,0 +1,52 @@ +;; This passes its input to `seal_xcm_execute` and returns the return value to its caller. +(module + (import "seal0" "xcm_execute" (func $xcm_execute (param i32 i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; 0x1000 = 4k in little endian + ;; Size of input buffer + (data (i32.const 0) "\00\10") + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "call") + ;; Receive the encoded call + (call $seal_input + (i32.const 4) ;; Pointer to the input buffer + (i32.const 0) ;; Pointer to the buffer length (before call) and to the copied data length (after call) + ) + ;; Input data layout. + ;; [0..4) - size of the call + ;; [4..) - message + + ;; Call xcm_execute with provided input. + (call $assert + (i32.eq + (call $xcm_execute + (i32.const 4) ;; Pointer where the message is stored + (i32.load (i32.const 0)) ;; Size of the message + (i32.const 100) ;; Pointer to the where the outcome is stored + ) + (i32.const 0) + ) + ) + + (call $seal_return + (i32.const 0) ;; flags + (i32.const 100) ;; Pointer to returned value + (i32.const 10) ;; length of returned value + ) + ) + + (func (export "deploy")) +) + diff --git a/substrate/frame/contracts/fixtures/data/xcm_send.wat b/substrate/frame/contracts/fixtures/data/xcm_send.wat new file mode 100644 index 00000000000..9eec6388de9 --- /dev/null +++ b/substrate/frame/contracts/fixtures/data/xcm_send.wat @@ -0,0 +1,59 @@ +;; This passes its input to `seal_xcm_send` and returns the return value to its caller. +(module + (import "seal0" "xcm_send" (func $xcm_send (param i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; 0x1000 = 4k in little endian + ;; size of input buffer + (data (i32.const 0) "\00\10") + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "call") + + ;; Receive the encoded call + (call $seal_input + (i32.const 4) ;; Pointer to the input buffer + (i32.const 0) ;; Size of the length buffer + ) + + ;; Input data layout. + ;; [0..4) - size of the call + ;; [4..7) - dest + ;; [7..) - message + + ;; Call xcm_send with provided input. + (call $assert + (i32.eq + (call $xcm_send + (i32.const 4) ;; Pointer where the dest is stored + (i32.const 7) ;; Pointer where the message is stored + (i32.sub + (i32.load (i32.const 0)) ;; length of the input buffer + (i32.const 3) ;; Size of the XCM dest + ) + (i32.const 100) ;; Pointer to the where the message_id is stored + ) + (i32.const 0) + ) + ) + + ;; Return the the message_id + (call $seal_return + (i32.const 0) ;; flags + (i32.const 100) ;; Pointer to returned value + (i32.const 32) ;; length of returned value + ) + ) + + (func (export "deploy")) +) diff --git a/substrate/frame/contracts/fixtures/src/lib.rs b/substrate/frame/contracts/fixtures/src/lib.rs index 32f4023e644..48117f7ca94 100644 --- a/substrate/frame/contracts/fixtures/src/lib.rs +++ b/substrate/frame/contracts/fixtures/src/lib.rs @@ -23,6 +23,8 @@ fn fixtures_root_dir() -> PathBuf { // When `CARGO_MANIFEST_DIR` is not set, Rust resolves relative paths from the root folder (Err(_), _) => "substrate/frame/contracts/fixtures/data".into(), (Ok(path), Ok(s)) if s == "pallet-contracts" => PathBuf::from(path).join("fixtures/data"), + (Ok(path), Ok(s)) if s == "pallet-contracts-mock-network" => + PathBuf::from(path).parent().unwrap().join("fixtures/data"), (Ok(_), pkg_name) => panic!("Failed to resolve fixture dir for tests from {pkg_name:?}."), } } diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml new file mode 100644 index 00000000000..9d5fe1aaf4e --- /dev/null +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -0,0 +1,91 @@ +[package] +name = "pallet-contracts-mock-network" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +homepage = "https://substrate.io" +repository.workspace = true +description = "A mock network for testing pallet-contracts" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len"] } + +frame-support = { path = "../../support", default-features = false} +frame-system = { path = "../../system", default-features = false} +pallet-assets = { path = "../../assets" } +pallet-balances = { path = "../../balances" } +pallet-contracts = { path = ".." } +pallet-contracts-primitives = { path = "../primitives", default-features = false} +pallet-contracts-proc-macro = { path = "../proc-macro" } +pallet-insecure-randomness-collective-flip = { path = "../../insecure-randomness-collective-flip" } +pallet-message-queue = { path = "../../message-queue" } +pallet-proxy = { path = "../../proxy" } +pallet-timestamp = { path = "../../timestamp" } +pallet-utility = { path = "../../utility" } +pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false} +polkadot-parachain-primitives = { path = "../../../../polkadot/parachain" } +polkadot-primitives = { path = "../../../../polkadot/primitives" } +polkadot-runtime-parachains = {path = "../../../../polkadot/runtime/parachains"} +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +sp-api = { path = "../../../primitives/api", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false} +sp-io = { path = "../../../primitives/io", default-features = false} +sp-keystore = { path = "../../../primitives/keystore" } +sp-runtime = { path = "../../../primitives/runtime", default-features = false} +sp-std = { path = "../../../primitives/std", default-features = false} +sp-tracing = { path = "../../../primitives/tracing" } +xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false} +xcm-builder = {package = "staging-xcm-builder", path = "../../../../polkadot/xcm/xcm-builder"} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm-simulator = {path = "../../../../polkadot/xcm/xcm-simulator"} + +[dev-dependencies] +assert_matches = "1" +pretty_assertions = "1" +pallet-contracts-fixtures = { path = "../fixtures" } + +[features] +default = [ "std" ] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "pallet-balances/std", + "pallet-contracts-primitives/std", + "pallet-contracts-proc-macro/full", + "pallet-contracts/std", + "pallet-insecure-randomness-collective-flip/std", + "pallet-proxy/std", + "pallet-timestamp/std", + "pallet-utility/std", + "pallet-xcm/std", + "scale-info/std", + "sp-api/std", + "sp-core/std", + "sp-io/std", + "sp-keystore/std", + "sp-runtime/std", + "sp-std/std", + "xcm-executor/std", + "xcm/std", +] + +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-contracts/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", + "polkadot-runtime-parachains/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] diff --git a/substrate/frame/contracts/mock-network/src/lib.rs b/substrate/frame/contracts/mock-network/src/lib.rs new file mode 100644 index 00000000000..345c69541b6 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/lib.rs @@ -0,0 +1,151 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub mod mocks; +pub mod parachain; +pub mod primitives; +pub mod relay_chain; + +#[cfg(test)] +mod tests; + +use crate::primitives::{AccountId, UNITS}; +use sp_runtime::BuildStorage; +use xcm::latest::{prelude::*, MultiLocation}; +use xcm_executor::traits::ConvertLocation; +use xcm_simulator::{decl_test_network, decl_test_parachain, decl_test_relay_chain, TestExt}; + +// Accounts +pub const ADMIN: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([0u8; 32]); +pub const ALICE: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([1u8; 32]); +pub const BOB: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([2u8; 32]); + +// Balances +pub const INITIAL_BALANCE: u128 = 1_000_000_000 * UNITS; + +decl_test_parachain! { + pub struct ParaA { + Runtime = parachain::Runtime, + XcmpMessageHandler = parachain::MsgQueue, + DmpMessageHandler = parachain::MsgQueue, + new_ext = para_ext(1), + } +} + +decl_test_relay_chain! { + pub struct Relay { + Runtime = relay_chain::Runtime, + RuntimeCall = relay_chain::RuntimeCall, + RuntimeEvent = relay_chain::RuntimeEvent, + XcmConfig = relay_chain::XcmConfig, + MessageQueue = relay_chain::MessageQueue, + System = relay_chain::System, + new_ext = relay_ext(), + } +} + +decl_test_network! { + pub struct MockNet { + relay_chain = Relay, + parachains = vec![ + (1, ParaA), + ], + } +} + +pub fn relay_sovereign_account_id() -> AccountId { + let location: MultiLocation = (Parent,).into(); + parachain::SovereignAccountOf::convert_location(&location).unwrap() +} + +pub fn parachain_sovereign_account_id(para: u32) -> AccountId { + let location: MultiLocation = (Parachain(para),).into(); + relay_chain::SovereignAccountOf::convert_location(&location).unwrap() +} + +pub fn parachain_account_sovereign_account_id( + para: u32, + who: sp_runtime::AccountId32, +) -> AccountId { + let location: MultiLocation = ( + Parachain(para), + AccountId32 { network: Some(relay_chain::RelayNetwork::get()), id: who.into() }, + ) + .into(); + relay_chain::SovereignAccountOf::convert_location(&location).unwrap() +} + +pub fn para_ext(para_id: u32) -> sp_io::TestExternalities { + use parachain::{MsgQueue, Runtime, System}; + + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![ + (ALICE, INITIAL_BALANCE), + (relay_sovereign_account_id(), INITIAL_BALANCE), + (BOB, INITIAL_BALANCE), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + + pallet_assets::GenesisConfig:: { + assets: vec![ + (0u128, ADMIN, false, 1u128), // Create derivative asset for relay's native token + ], + metadata: Default::default(), + accounts: vec![ + (0u128, ALICE, INITIAL_BALANCE), + (0u128, relay_sovereign_account_id(), INITIAL_BALANCE), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| { + sp_tracing::try_init_simple(); + System::set_block_number(1); + MsgQueue::set_para_id(para_id.into()); + }); + ext +} + +pub fn relay_ext() -> sp_io::TestExternalities { + use relay_chain::{Runtime, System}; + + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![ + (ALICE, INITIAL_BALANCE), + (parachain_sovereign_account_id(1), INITIAL_BALANCE), + (parachain_account_sovereign_account_id(1, ALICE), INITIAL_BALANCE), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| { + System::set_block_number(1); + }); + ext +} + +pub type ParachainPalletXcm = pallet_xcm::Pallet; +pub type ParachainBalances = pallet_balances::Pallet; diff --git a/substrate/frame/contracts/mock-network/src/mocks.rs b/substrate/frame/contracts/mock-network/src/mocks.rs new file mode 100644 index 00000000000..bf3baec7a52 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/mocks.rs @@ -0,0 +1,18 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub mod msg_queue; +pub mod relay_message_queue; diff --git a/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs b/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs new file mode 100644 index 00000000000..82fb8590e26 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs @@ -0,0 +1,168 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Parachain runtime mock. + +use codec::{Decode, Encode}; + +use frame_support::weights::Weight; +use polkadot_parachain_primitives::primitives::{ + DmpMessageHandler, Id as ParaId, XcmpMessageFormat, XcmpMessageHandler, +}; +use polkadot_primitives::BlockNumber as RelayBlockNumber; +use sp_runtime::traits::{Get, Hash}; + +use sp_std::prelude::*; +use xcm::{latest::prelude::*, VersionedXcm}; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type XcmExecutor: ExecuteXcm; + } + + #[pallet::call] + impl Pallet {} + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::storage] + #[pallet::getter(fn parachain_id)] + pub(super) type ParachainId = StorageValue<_, ParaId, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn received_dmp)] + /// A queue of received DMP messages + pub(super) type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; + + impl Get for Pallet { + fn get() -> ParaId { + Self::parachain_id() + } + } + + pub type MessageId = [u8; 32]; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Some XCM was executed OK. + Success(Option), + /// Some XCM failed. + Fail(Option, XcmError), + /// Bad XCM version used. + BadVersion(Option), + /// Bad XCM format used. + BadFormat(Option), + + // DMP + /// Downward message is invalid XCM. + InvalidFormat(MessageId), + /// Downward message is unsupported version of XCM. + UnsupportedVersion(MessageId), + /// Downward message executed with the given outcome. + ExecutedDownward(MessageId, Outcome), + } + + impl Pallet { + pub fn set_para_id(para_id: ParaId) { + ParachainId::::put(para_id); + } + + fn handle_xcmp_message( + sender: ParaId, + _sent_at: RelayBlockNumber, + xcm: VersionedXcm, + max_weight: Weight, + ) -> Result { + let hash = Encode::using_encoded(&xcm, T::Hashing::hash); + let message_hash = Encode::using_encoded(&xcm, sp_io::hashing::blake2_256); + let (result, event) = match Xcm::::try_from(xcm) { + Ok(xcm) => { + let location = (Parent, Parachain(sender.into())); + match T::XcmExecutor::execute_xcm(location, xcm, message_hash, max_weight) { + Outcome::Error(e) => (Err(e), Event::Fail(Some(hash), e)), + Outcome::Complete(w) => (Ok(w), Event::Success(Some(hash))), + // As far as the caller is concerned, this was dispatched without error, so + // we just report the weight used. + Outcome::Incomplete(w, e) => (Ok(w), Event::Fail(Some(hash), e)), + } + }, + Err(()) => (Err(XcmError::UnhandledXcmVersion), Event::BadVersion(Some(hash))), + }; + Self::deposit_event(event); + result + } + } + + impl XcmpMessageHandler for Pallet { + fn handle_xcmp_messages<'a, I: Iterator>( + iter: I, + max_weight: Weight, + ) -> Weight { + for (sender, sent_at, data) in iter { + let mut data_ref = data; + let _ = XcmpMessageFormat::decode(&mut data_ref) + .expect("Simulator encodes with versioned xcm format; qed"); + + let mut remaining_fragments = data_ref; + while !remaining_fragments.is_empty() { + if let Ok(xcm) = + VersionedXcm::::decode(&mut remaining_fragments) + { + let _ = Self::handle_xcmp_message(sender, sent_at, xcm, max_weight); + } else { + debug_assert!(false, "Invalid incoming XCMP message data"); + } + } + } + max_weight + } + } + + impl DmpMessageHandler for Pallet { + fn handle_dmp_messages( + iter: impl Iterator)>, + limit: Weight, + ) -> Weight { + for (_i, (_sent_at, data)) in iter.enumerate() { + let id = sp_io::hashing::blake2_256(&data[..]); + let maybe_versioned = VersionedXcm::::decode(&mut &data[..]); + match maybe_versioned { + Err(_) => { + Self::deposit_event(Event::InvalidFormat(id)); + }, + Ok(versioned) => match Xcm::try_from(versioned) { + Err(()) => Self::deposit_event(Event::UnsupportedVersion(id)), + Ok(x) => { + let outcome = T::XcmExecutor::execute_xcm(Parent, x.clone(), id, limit); + >::append(x); + Self::deposit_event(Event::ExecutedDownward(id, outcome)); + }, + }, + } + } + limit + } + } +} diff --git a/substrate/frame/contracts/mock-network/src/mocks/relay_message_queue.rs b/substrate/frame/contracts/mock-network/src/mocks/relay_message_queue.rs new file mode 100644 index 00000000000..14099965e3f --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/mocks/relay_message_queue.rs @@ -0,0 +1,52 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use frame_support::{parameter_types, weights::Weight}; +use xcm::latest::prelude::*; +use xcm_simulator::{ + AggregateMessageOrigin, ProcessMessage, ProcessMessageError, UmpQueueId, WeightMeter, +}; + +use crate::relay_chain::{RuntimeCall, XcmConfig}; + +parameter_types! { + /// Amount of weight that can be spent per block to service messages. + pub MessageQueueServiceWeight: Weight = Weight::from_parts(1_000_000_000, 1_000_000); + pub const MessageQueueHeapSize: u32 = 65_536; + pub const MessageQueueMaxStale: u32 = 16; +} + +/// Message processor to handle any messages that were enqueued into the `MessageQueue` pallet. +pub struct MessageProcessor; +impl ProcessMessage for MessageProcessor { + type Origin = AggregateMessageOrigin; + + fn process_message( + message: &[u8], + origin: Self::Origin, + meter: &mut WeightMeter, + id: &mut [u8; 32], + ) -> Result { + let para = match origin { + AggregateMessageOrigin::Ump(UmpQueueId::Para(para)) => para, + }; + xcm_builder::ProcessXcmMessage::< + Junction, + xcm_executor::XcmExecutor, + RuntimeCall, + >::process_message(message, Junction::Parachain(para.into()), meter, id) + } +} diff --git a/substrate/frame/contracts/mock-network/src/parachain.rs b/substrate/frame/contracts/mock-network/src/parachain.rs new file mode 100644 index 00000000000..1465b02f903 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/parachain.rs @@ -0,0 +1,353 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Parachain runtime mock. + +mod contracts_config; +use crate::{ + mocks::msg_queue::pallet as mock_msg_queue, + primitives::{AccountId, AssetIdForAssets, Balance}, +}; +use core::marker::PhantomData; +use frame_support::{ + construct_runtime, parameter_types, + traits::{AsEnsureOriginWithArg, Contains, ContainsPair, Everything, EverythingBut, Nothing}, + weights::{ + constants::{WEIGHT_PROOF_SIZE_PER_MB, WEIGHT_REF_TIME_PER_SECOND}, + Weight, + }, +}; +use frame_system::{EnsureRoot, EnsureSigned}; +use pallet_xcm::XcmPassthrough; +use sp_core::{ConstU32, ConstU64, H256}; +use sp_runtime::traits::{Get, IdentityLookup, MaybeEquivalence}; + +use sp_std::prelude::*; +use xcm::latest::prelude::*; +use xcm_builder::{ + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom, + ConvertedConcreteId, CurrencyAdapter as XcmCurrencyAdapter, EnsureXcmOrigin, + FixedRateOfFungible, FixedWeightBounds, FungiblesAdapter, IsConcrete, NativeAsset, NoChecking, + ParentAsSuperuser, ParentIsPreset, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, WithComputedOrigin, +}; +use xcm_executor::{traits::JustTry, Config, XcmExecutor}; + +pub type SovereignAccountOf = + (AccountId32Aliases, ParentIsPreset); + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +impl frame_system::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Block = Block; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = Everything; + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +parameter_types! { + pub ExistentialDeposit: Balance = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + type AccountStore = System; + type Balance = Balance; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type FreezeIdentifier = (); + type MaxFreezes = ConstU32<0>; + type MaxHolds = ConstU32<1>; + type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type RuntimeEvent = RuntimeEvent; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type WeightInfo = (); +} + +parameter_types! { + pub const AssetDeposit: u128 = 1_000_000; + pub const MetadataDepositBase: u128 = 1_000_000; + pub const MetadataDepositPerByte: u128 = 100_000; + pub const AssetAccountDeposit: u128 = 1_000_000; + pub const ApprovalDeposit: u128 = 1_000_000; + pub const AssetsStringLimit: u32 = 50; + pub const RemoveItemsLimit: u32 = 50; +} + +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = AssetIdForAssets; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = EnsureRoot; + type AssetDeposit = AssetDeposit; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type AssetAccountDeposit = AssetAccountDeposit; + type ApprovalDeposit = ApprovalDeposit; + type StringLimit = AssetsStringLimit; + type Freezer = (); + type Extra = (); + type WeightInfo = (); + type RemoveItemsLimit = RemoveItemsLimit; + type AssetIdParameter = AssetIdForAssets; + type CallbackHandle = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); + pub const ReservedDmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); +} + +parameter_types! { + pub const KsmLocation: MultiLocation = MultiLocation::parent(); + pub const TokenLocation: MultiLocation = Here.into_location(); + pub const RelayNetwork: NetworkId = ByGenesis([0; 32]); + pub UniversalLocation: InteriorMultiLocation = Parachain(MsgQueue::parachain_id().into()).into(); +} + +pub type XcmOriginToCallOrigin = ( + SovereignSignedViaLocation, + ParentAsSuperuser, + SignedAccountId32AsNative, + XcmPassthrough, +); + +parameter_types! { + pub const XcmInstructionWeight: Weight = Weight::from_parts(1_000, 1_000); + pub TokensPerSecondPerMegabyte: (AssetId, u128, u128) = (Concrete(Parent.into()), 1_000_000_000_000, 1024 * 1024); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; + pub ForeignPrefix: MultiLocation = (Parent,).into(); + pub CheckingAccount: AccountId = PolkadotXcm::check_account(); + pub TrustedLockPairs: (MultiLocation, MultiAssetFilter) = + (Parent.into(), Wild(AllOf { id: Concrete(Parent.into()), fun: WildFungible })); +} + +pub fn estimate_message_fee(number_of_instructions: u64) -> u128 { + let weight = estimate_weight(number_of_instructions); + + estimate_fee_for_weight(weight) +} + +pub fn estimate_weight(number_of_instructions: u64) -> Weight { + XcmInstructionWeight::get().saturating_mul(number_of_instructions) +} + +pub fn estimate_fee_for_weight(weight: Weight) -> u128 { + let (_, units_per_second, units_per_mb) = TokensPerSecondPerMegabyte::get(); + + units_per_second * (weight.ref_time() as u128) / (WEIGHT_REF_TIME_PER_SECOND as u128) + + units_per_mb * (weight.proof_size() as u128) / (WEIGHT_PROOF_SIZE_PER_MB as u128) +} + +pub type LocalBalancesTransactor = + XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; + +pub struct FromMultiLocationToAsset(PhantomData<(MultiLocation, AssetId)>); +impl MaybeEquivalence + for FromMultiLocationToAsset +{ + fn convert(value: &MultiLocation) -> Option { + match *value { + MultiLocation { parents: 1, interior: Here } => Some(0 as AssetIdForAssets), + MultiLocation { parents: 1, interior: X1(Parachain(para_id)) } => + Some(para_id as AssetIdForAssets), + _ => None, + } + } + + fn convert_back(_id: &AssetIdForAssets) -> Option { + None + } +} + +pub type ForeignAssetsTransactor = FungiblesAdapter< + Assets, + ConvertedConcreteId< + AssetIdForAssets, + Balance, + FromMultiLocationToAsset, + JustTry, + >, + SovereignAccountOf, + AccountId, + NoChecking, + CheckingAccount, +>; + +/// Means for transacting assets on this chain +pub type AssetTransactors = (LocalBalancesTransactor, ForeignAssetsTransactor); + +pub struct ParentRelay; +impl Contains for ParentRelay { + fn contains(location: &MultiLocation) -> bool { + location.contains_parents_only(1) + } +} +pub struct ThisParachain; +impl Contains for ThisParachain { + fn contains(location: &MultiLocation) -> bool { + matches!( + location, + MultiLocation { parents: 0, interior: Junctions::X1(Junction::AccountId32 { .. }) } + ) + } +} + +pub type XcmRouter = crate::ParachainXcmRouter; + +pub type Barrier = ( + xcm_builder::AllowUnpaidExecutionFrom, + WithComputedOrigin< + (AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom), + UniversalLocation, + ConstU32<1>, + >, +); + +parameter_types! { + pub NftCollectionOne: MultiAssetFilter + = Wild(AllOf { fun: WildNonFungible, id: Concrete((Parent, GeneralIndex(1)).into()) }); + pub NftCollectionOneForRelay: (MultiAssetFilter, MultiLocation) + = (NftCollectionOne::get(), Parent.into()); + pub RelayNativeAsset: MultiAssetFilter = Wild(AllOf { fun: WildFungible, id: Concrete((Parent, Here).into()) }); + pub RelayNativeAssetForRelay: (MultiAssetFilter, MultiLocation) = (RelayNativeAsset::get(), Parent.into()); +} +pub type TrustedTeleporters = + (xcm_builder::Case, xcm_builder::Case); +pub type TrustedReserves = EverythingBut>; + +pub struct XcmConfig; +impl Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = AssetTransactors; + type OriginConverter = XcmOriginToCallOrigin; + type IsReserve = (NativeAsset, TrustedReserves); + type IsTeleporter = TrustedTeleporters; + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = FixedRateOfFungible; + type ResponseHandler = PolkadotXcm; + type AssetTrap = PolkadotXcm; + type AssetLocker = PolkadotXcm; + type AssetExchanger = (); + type AssetClaims = PolkadotXcm; + type SubscriptionService = PolkadotXcm; + type PalletInstancesInfo = AllPalletsWithSystem; + type FeeManager = (); + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; +} + +impl mock_msg_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +pub struct TrustedLockerCase(PhantomData); +impl> ContainsPair + for TrustedLockerCase +{ + fn contains(origin: &MultiLocation, asset: &MultiAsset) -> bool { + let (o, a) = T::get(); + a.matches(asset) && &o == origin + } +} + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = XcmRouter; + type ExecuteXcmOrigin = EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Nothing; + type XcmReserveTransferFilter = Everything; + type Weigher = FixedWeightBounds; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = IsConcrete; + type TrustedLockers = TrustedLockerCase; + type SovereignAccountOf = SovereignAccountOf; + type MaxLockers = ConstU32<8>; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + type WeightInfo = pallet_xcm::TestWeightInfo; + type AdminOrigin = EnsureRoot; +} + +type Block = frame_system::mocking::MockBlock; + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<1>; + type WeightInfo = (); +} + +construct_runtime!( + pub enum Runtime + { + System: frame_system, + Balances: pallet_balances, + Timestamp: pallet_timestamp, + MsgQueue: mock_msg_queue, + PolkadotXcm: pallet_xcm, + Contracts: pallet_contracts, + Assets: pallet_assets, + } +); diff --git a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs new file mode 100644 index 00000000000..dadba394e26 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs @@ -0,0 +1,98 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::{Balances, Runtime, RuntimeCall, RuntimeEvent}; +use crate::{ + parachain, + parachain::RuntimeHoldReason, + primitives::{Balance, CENTS}, +}; +use frame_support::{ + parameter_types, + traits::{ConstBool, ConstU32, Contains, Randomness}, + weights::Weight, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use pallet_xcm::BalanceOf; +use sp_runtime::{traits::Convert, Perbill}; + +pub const fn deposit(items: u32, bytes: u32) -> Balance { + items as Balance * 1 * CENTS + (bytes as Balance) * 1 * CENTS +} + +parameter_types! { + pub const DepositPerItem: Balance = deposit(1, 0); + pub const DepositPerByte: Balance = deposit(0, 1); + pub const DefaultDepositLimit: Balance = deposit(1024, 1024 * 1024); + pub Schedule: pallet_contracts::Schedule = Default::default(); + pub const CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(0); + pub const MaxDelegateDependencies: u32 = 32; +} + +pub struct DummyRandomness(sp_std::marker::PhantomData); + +impl Randomness> for DummyRandomness { + fn random(_subject: &[u8]) -> (T::Hash, BlockNumberFor) { + (Default::default(), Default::default()) + } +} + +impl Convert> for Runtime { + fn convert(w: Weight) -> BalanceOf { + w.ref_time().into() + } +} + +#[derive(Clone, Default)] +pub struct Filters; + +impl Contains for Filters { + fn contains(call: &RuntimeCall) -> bool { + match call { + parachain::RuntimeCall::Contracts(_) => true, + _ => false, + } + } +} + +impl pallet_contracts::Config for Runtime { + type AddressGenerator = pallet_contracts::DefaultAddressGenerator; + type CallFilter = Filters; + type CallStack = [pallet_contracts::Frame; 5]; + type ChainExtension = (); + type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; + type Currency = Balances; + type DefaultDepositLimit = DefaultDepositLimit; + type DepositPerByte = DepositPerByte; + type DepositPerItem = DepositPerItem; + type MaxCodeLen = ConstU32<{ 123 * 1024 }>; + type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; + type MaxDelegateDependencies = MaxDelegateDependencies; + type MaxStorageKeyLen = ConstU32<128>; + type Migrations = (); + type Randomness = DummyRandomness; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type RuntimeHoldReason = RuntimeHoldReason; + type Schedule = Schedule; + type Time = super::Timestamp; + type UnsafeUnstableInterface = ConstBool; + type WeightInfo = (); + type WeightPrice = Self; + type Debug = (); + type Environment = (); + type Xcm = pallet_xcm::Pallet; +} diff --git a/substrate/frame/contracts/mock-network/src/primitives.rs b/substrate/frame/contracts/mock-network/src/primitives.rs new file mode 100644 index 00000000000..efc42772f88 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/primitives.rs @@ -0,0 +1,23 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub type Balance = u128; + +pub const UNITS: Balance = 10_000_000_000; +pub const CENTS: Balance = UNITS / 100; // 100_000_000 + +pub type AccountId = sp_runtime::AccountId32; +pub type AssetIdForAssets = u128; diff --git a/substrate/frame/contracts/mock-network/src/relay_chain.rs b/substrate/frame/contracts/mock-network/src/relay_chain.rs new file mode 100644 index 00000000000..c59c8e4bfa8 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/relay_chain.rs @@ -0,0 +1,236 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Relay chain runtime mock. + +use frame_support::{ + construct_runtime, parameter_types, + traits::{Contains, Everything, Nothing}, + weights::Weight, +}; + +use frame_system::EnsureRoot; +use sp_core::{ConstU32, H256}; +use sp_runtime::traits::IdentityLookup; + +use polkadot_parachain_primitives::primitives::Id as ParaId; +use polkadot_runtime_parachains::{configuration, origin, shared}; +use xcm::latest::prelude::*; +use xcm_builder::{ + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, + ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, DescribeAllTerminal, + DescribeFamily, FixedRateOfFungible, FixedWeightBounds, HashedDescription, IsConcrete, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, WithComputedOrigin, +}; +use xcm_executor::{Config, XcmExecutor}; + +use super::{ + mocks::relay_message_queue::*, + primitives::{AccountId, Balance}, +}; + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +impl frame_system::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Block = Block; + type Nonce = u64; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = Everything; + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +parameter_types! { + pub ExistentialDeposit: Balance = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = MaxLocks; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxHolds = ConstU32<0>; + type MaxFreezes = ConstU32<0>; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; +} + +impl shared::Config for Runtime {} + +impl configuration::Config for Runtime { + type WeightInfo = configuration::TestWeightInfo; +} + +parameter_types! { + pub RelayNetwork: NetworkId = ByGenesis([0; 32]); + pub const TokenLocation: MultiLocation = Here.into_location(); + pub UniversalLocation: InteriorMultiLocation = Here; + pub UnitWeightCost: u64 = 1_000; +} + +pub type SovereignAccountOf = ( + HashedDescription>, + AccountId32Aliases, + ChildParachainConvertsVia, +); + +pub type LocalBalancesTransactor = + XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; + +pub type AssetTransactors = LocalBalancesTransactor; + +type LocalOriginConverter = ( + SovereignSignedViaLocation, + ChildParachainAsNative, + SignedAccountId32AsNative, + ChildSystemParachainAsSuperuser, +); + +parameter_types! { + pub const XcmInstructionWeight: Weight = Weight::from_parts(1_000, 1_000); + pub TokensPerSecondPerMegabyte: (AssetId, u128, u128) = + (Concrete(TokenLocation::get()), 1_000_000_000_000, 1024 * 1024); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; +} + +pub struct ChildrenParachains; +impl Contains for ChildrenParachains { + fn contains(location: &MultiLocation) -> bool { + matches!(location, MultiLocation { parents: 0, interior: X1(Parachain(_)) }) + } +} + +pub type XcmRouter = crate::RelayChainXcmRouter; +pub type Barrier = WithComputedOrigin< + ( + AllowExplicitUnpaidExecutionFrom, + AllowTopLevelPaidExecutionFrom, + AllowSubscriptionsFrom, + ), + UniversalLocation, + ConstU32<1>, +>; + +pub struct XcmConfig; +impl Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = AssetTransactors; + type OriginConverter = LocalOriginConverter; + type IsReserve = (); + type IsTeleporter = (); + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = FixedRateOfFungible; + type ResponseHandler = XcmPallet; + type AssetTrap = XcmPallet; + type AssetLocker = XcmPallet; + type AssetExchanger = (); + type AssetClaims = XcmPallet; + type SubscriptionService = XcmPallet; + type PalletInstancesInfo = AllPalletsWithSystem; + type FeeManager = (); + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmRouter = XcmRouter; + type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Everything; + type Weigher = FixedWeightBounds; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = IsConcrete; + type TrustedLockers = (); + type SovereignAccountOf = SovereignAccountOf; + type MaxLockers = ConstU32<8>; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + type WeightInfo = pallet_xcm::TestWeightInfo; + type AdminOrigin = EnsureRoot; +} + +impl origin::Config for Runtime {} + +type Block = frame_system::mocking::MockBlock; + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Size = u32; + type HeapSize = MessageQueueHeapSize; + type MaxStale = MessageQueueMaxStale; + type ServiceWeight = MessageQueueServiceWeight; + type MessageProcessor = MessageProcessor; + type QueueChangeHandler = (); + type WeightInfo = (); + type QueuePausedQuery = (); +} + +construct_runtime!( + pub enum Runtime { + System: frame_system::{Pallet, Call, Storage, Config, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + ParasOrigin: origin::{Pallet, Origin}, + XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin}, + MessageQueue: pallet_message_queue::{Pallet, Event}, + } +); diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs new file mode 100644 index 00000000000..5193f657055 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -0,0 +1,239 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + parachain::{self, Runtime}, + parachain_account_sovereign_account_id, + primitives::{AccountId, CENTS}, + relay_chain, MockNet, ParaA, ParachainBalances, Relay, ALICE, BOB, INITIAL_BALANCE, +}; +use assert_matches::assert_matches; +use codec::{Decode, Encode}; +use frame_support::{ + assert_err, + pallet_prelude::Weight, + traits::{fungibles::Mutate, Currency}, +}; +use pallet_balances::{BalanceLock, Reasons}; +use pallet_contracts::{CollectEvents, DebugInfo, Determinism}; +use pallet_contracts_fixtures::compile_module; +use pallet_contracts_primitives::Code; +use xcm::{v3::prelude::*, VersionedMultiLocation, VersionedXcm}; +use xcm_simulator::TestExt; + +type ParachainContracts = pallet_contracts::Pallet; + +/// Instantiate the tests contract, and fund it with some balance and assets. +fn instantiate_test_contract(name: &str) -> AccountId { + let (wasm, _) = compile_module::(name).unwrap(); + + // Instantiate contract. + let contract_addr = ParaA::execute_with(|| { + ParachainContracts::bare_instantiate( + ALICE, + 0, + Weight::MAX, + None, + Code::Upload(wasm), + vec![], + vec![], + DebugInfo::UnsafeDebug, + CollectEvents::Skip, + ) + .result + .unwrap() + .account_id + }); + + // Funds contract account with some balance and assets. + ParaA::execute_with(|| { + parachain::Balances::make_free_balance_be(&contract_addr, INITIAL_BALANCE); + parachain::Assets::mint_into(0u32.into(), &contract_addr, INITIAL_BALANCE).unwrap(); + }); + Relay::execute_with(|| { + let sovereign_account = parachain_account_sovereign_account_id(1u32, contract_addr.clone()); + relay_chain::Balances::make_free_balance_be(&sovereign_account, INITIAL_BALANCE); + }); + + contract_addr +} + +#[test] +fn test_xcm_execute() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + + // Execute XCM instructions through the contract. + ParaA::execute_with(|| { + let amount: u128 = 10 * CENTS; + + // The XCM used to transfer funds to Bob. + let message: xcm_simulator::Xcm<()> = Xcm(vec![ + WithdrawAsset(vec![(Here, amount).into()].into()), + DepositAsset { + assets: All.into(), + beneficiary: AccountId32 { network: None, id: BOB.clone().into() }.into(), + }, + ]); + + let result = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + VersionedXcm::V3(message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ) + .result + .unwrap(); + + let mut data = &result.data[..]; + let outcome = Outcome::decode(&mut data).expect("Failed to decode xcm_execute Outcome"); + assert_matches!(outcome, Outcome::Complete(_)); + + // Check if the funds are subtracted from the account of Alice and added to the account of + // Bob. + let initial = INITIAL_BALANCE; + assert_eq!(parachain::Assets::balance(0, contract_addr), initial); + assert_eq!(ParachainBalances::free_balance(BOB), initial + amount); + }); +} + +#[test] +fn test_xcm_execute_filtered_call() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + + ParaA::execute_with(|| { + // `remark` should be rejected, as it is not allowed by our CallFilter. + let call = parachain::RuntimeCall::System(frame_system::Call::remark { remark: vec![] }); + let message: Xcm = Xcm(vec![Transact { + origin_kind: OriginKind::Native, + require_weight_at_most: Weight::MAX, + call: call.encode().into(), + }]); + + let result = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + VersionedXcm::V3(message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ); + + assert_err!(result.result, frame_system::Error::::CallFiltered); + }); +} + +#[test] +fn test_xcm_execute_reentrant_call() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + + ParaA::execute_with(|| { + let transact_call = parachain::RuntimeCall::Contracts(pallet_contracts::Call::call { + dest: contract_addr.clone(), + gas_limit: 1_000_000.into(), + storage_deposit_limit: None, + data: vec![], + value: 0u128, + }); + + // The XCM used to transfer funds to Bob. + let message: Xcm = Xcm(vec![ + Transact { + origin_kind: OriginKind::Native, + require_weight_at_most: 1_000_000_000.into(), + call: transact_call.encode().into(), + }, + ExpectTransactStatus(MaybeErrorCode::Success), + ]); + + let result = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + VersionedXcm::V3(message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ) + .result + .unwrap(); + + let mut data = &result.data[..]; + let outcome = Outcome::decode(&mut data).expect("Failed to decode xcm_execute Outcome"); + assert_matches!(outcome, Outcome::Incomplete(_, XcmError::ExpectationFalse)); + + // Funds should not change hands as the XCM transact failed. + assert_eq!(ParachainBalances::free_balance(BOB), INITIAL_BALANCE); + }); +} + +#[test] +fn test_xcm_send() { + MockNet::reset(); + let contract_addr = instantiate_test_contract("xcm_send"); + let fee = parachain::estimate_message_fee(4); // Accounts for the `DescendOrigin` instruction added by `send_xcm` + + // Send XCM instructions through the contract, to lock some funds on the relay chain. + ParaA::execute_with(|| { + let dest = MultiLocation::from(Parent); + let dest = VersionedMultiLocation::V3(dest); + + let message: xcm_simulator::Xcm<()> = Xcm(vec![ + WithdrawAsset((Here, fee).into()), + BuyExecution { fees: (Here, fee).into(), weight_limit: WeightLimit::Unlimited }, + LockAsset { asset: (Here, 5 * CENTS).into(), unlocker: (Parachain(1)).into() }, + ]); + let message = VersionedXcm::V3(message); + let exec = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + (dest, message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ); + + let mut data = &exec.result.unwrap().data[..]; + XcmHash::decode(&mut data).expect("Failed to decode xcm_send message_id"); + }); + + Relay::execute_with(|| { + // Check if the funds are locked on the relay chain. + assert_eq!( + relay_chain::Balances::locks(¶chain_account_sovereign_account_id(1, contract_addr)), + vec![BalanceLock { id: *b"py/xcmlk", amount: 5 * CENTS, reasons: Reasons::All }] + ); + }); +} diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 7d516fbe249..188679dbf49 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -403,6 +403,14 @@ pub mod pallet { /// its type appears in the metadata. Only valid value is `()`. #[pallet::constant] type Environment: Get>; + + /// A type that exposes XCM APIs, allowing contracts to interact with other parachains, and + /// execute XCM programs. + type Xcm: xcm_builder::Controller< + OriginFor, + ::RuntimeCall, + BlockNumberFor, + >; } #[pallet::hooks] @@ -1004,6 +1012,8 @@ pub mod pallet { /// in this error. Note that this usually shouldn't happen as deploying such contracts /// is rejected. NoChainExtension, + /// Failed to decode the XCM program. + XCMDecodeFailed, /// A contract with the same AccountId already exists. DuplicateContract, /// A contract self destructed in its constructor. diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index e7784b02b74..76fd012852a 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -486,6 +486,7 @@ impl Config for Test { type MaxDelegateDependencies = MaxDelegateDependencies; type Debug = TestDebug; type Environment = (); + type Xcm = (); } pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 4fd52b471a0..b3013adb790 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -23,10 +23,16 @@ use crate::{ schedule::HostFnWeights, BalanceOf, CodeHash, Config, DebugBufferVec, Error, SENTINEL, }; - use bitflags::bitflags; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; -use frame_support::{ensure, traits::Get, weights::Weight}; +use frame_support::{ + dispatch::DispatchInfo, + ensure, + pallet_prelude::{DispatchResult, DispatchResultWithPostInfo}, + parameter_types, + traits::Get, + weights::Weight, +}; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pallet_contracts_proc_macro::define_env; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; @@ -36,6 +42,9 @@ use sp_runtime::{ }; use sp_std::{fmt, prelude::*}; use wasmi::{core::HostError, errors::LinkerError, Linker, Memory, Store}; +use xcm::VersionedXcm; + +type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; @@ -113,6 +122,17 @@ pub enum ReturnCode { EcdsaRecoverFailed = 11, /// sr25519 signature verification failed. Sr25519VerifyFailed = 12, + /// The `xcm_execute` call failed. + XcmExecutionFailed = 13, + /// The `xcm_send` call failed. + XcmSendFailed = 14, +} + +parameter_types! { + /// Getter types used by [`crate::api_doc::Current::call_runtime`] + const CallRuntimeFailed: ReturnCode = ReturnCode::CallRuntimeFailed; + /// Getter types used by [`crate::api_doc::Current::xcm_execute`] + const XcmExecutionFailed: ReturnCode = ReturnCode::XcmExecutionFailed; } impl From for ReturnCode { @@ -461,6 +481,29 @@ fn already_charged(_: u32) -> Option { None } +/// Ensure that the XCM program is executable, by checking that it does not contain any [`Transact`] +/// instruction with a call that is not allowed by the CallFilter. +fn ensure_executable(message: &VersionedXcm>) -> DispatchResult { + use frame_support::traits::Contains; + use xcm::prelude::{Transact, Xcm}; + + let mut message: Xcm> = + message.clone().try_into().map_err(|_| Error::::XCMDecodeFailed)?; + + message.iter_mut().try_for_each(|inst| -> DispatchResult { + let Transact { ref mut call, .. } = inst else { return Ok(()) }; + let call = call.ensure_decoded().map_err(|_| Error::::XCMDecodeFailed)?; + + if !::CallFilter::contains(call) { + return Err(frame_system::Error::::CallFiltered.into()) + } + + Ok(()) + })?; + + Ok(()) +} + /// Can only be used for one call. pub struct Runtime<'a, E: Ext + 'a> { ext: &'a mut E, @@ -558,6 +601,32 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { self.ext.gas_meter_mut().adjust_gas(charged, token); } + /// Charge, Run and adjust gas, for executing the given dispatchable. + fn call_dispatchable< + ErrorReturnCode: Get, + F: FnOnce(&mut Self) -> DispatchResultWithPostInfo, + >( + &mut self, + dispatch_info: DispatchInfo, + run: F, + ) -> Result { + use frame_support::dispatch::extract_actual_weight; + let charged = self.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; + let result = run(self); + let actual_weight = extract_actual_weight(&result, &dispatch_info); + self.adjust_gas(charged, RuntimeCosts::CallRuntime(actual_weight)); + match result { + Ok(_) => Ok(ReturnCode::Success), + Err(e) => { + if self.ext.append_debug_buffer("") { + self.ext.append_debug_buffer("call failed with: "); + self.ext.append_debug_buffer(e.into()); + }; + Ok(ErrorReturnCode::get()) + }, + } + } + /// Read designated chunk from the sandbox memory. /// /// Returns `Err` if one of the following conditions occurs: @@ -633,8 +702,10 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { let ptr = ptr as usize; let mut bound_checked = memory.get(ptr..ptr + len as usize).ok_or_else(|| Error::::OutOfBounds)?; + let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut bound_checked) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; + Ok(decoded) } @@ -1023,6 +1094,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { // for every function. #[define_env(doc)] pub mod env { + /// Set the value at the given key in the contract storage. /// /// Equivalent to the newer [`seal1`][`super::api_doc::Version1::set_storage`] version with the @@ -2584,7 +2656,7 @@ pub mod env { /// # Return Value /// /// Returns `ReturnCode::Success` when the dispatchable was successfully executed and - /// returned `Ok`. When the dispatchable was exeuted but returned an error + /// returned `Ok`. When the dispatchable was executed but returned an error /// `ReturnCode::CallRuntimeFailed` is returned. The full error is not /// provided because it is not guaranteed to be stable. /// @@ -2605,23 +2677,118 @@ pub mod env { call_ptr: u32, call_len: u32, ) -> Result { - use frame_support::dispatch::{extract_actual_weight, GetDispatchInfo}; + use frame_support::dispatch::GetDispatchInfo; ctx.charge_gas(RuntimeCosts::CopyFromContract(call_len))?; let call: ::RuntimeCall = ctx.read_sandbox_memory_as_unbounded(memory, call_ptr, call_len)?; - let dispatch_info = call.get_dispatch_info(); - let charged = ctx.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; - let result = ctx.ext.call_runtime(call); - let actual_weight = extract_actual_weight(&result, &dispatch_info); - ctx.adjust_gas(charged, RuntimeCosts::CallRuntime(actual_weight)); - match result { - Ok(_) => Ok(ReturnCode::Success), + ctx.call_dispatchable::(call.get_dispatch_info(), |ctx| { + ctx.ext.call_runtime(call) + }) + } + + /// Execute an XCM program locally, using the contract's address as the origin. + /// This is equivalent to dispatching `pallet_xcm::execute` through call_runtime, except that + /// the function is called directly instead of being dispatched. + /// + /// # Parameters + /// + /// - `msg_ptr`: the pointer into the linear memory where the [`xcm::prelude::VersionedXcm`] is + /// placed. + /// - `msg_len`: the length of the message in bytes. + /// - `output_ptr`: the pointer into the linear memory where the [`xcm::prelude::Outcome`] + /// message id is placed. + /// + /// # Return Value + /// + /// Returns `ReturnCode::Success` when the XCM was successfully executed. When the XCM + /// execution fails, `ReturnCode::XcmExecutionFailed` is returned. + #[unstable] + fn xcm_execute( + ctx: _, + memory: _, + msg_ptr: u32, + msg_len: u32, + output_ptr: u32, + ) -> Result { + use frame_support::dispatch::DispatchInfo; + use xcm::VersionedXcm; + use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; + + ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; + let message: VersionedXcm> = + ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + + let execute_weight = + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); + let weight = ctx.ext.gas_meter().gas_left().max(execute_weight); + let dispatch_info = DispatchInfo { weight, ..Default::default() }; + + ensure_executable::(&message)?; + ctx.call_dispatchable::(dispatch_info, |ctx| { + let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); + let outcome = <::Xcm>::execute( + origin, + Box::new(message), + weight.saturating_sub(execute_weight), + )?; + + ctx.write_sandbox_memory(memory, output_ptr, &outcome.encode())?; + let pre_dispatch_weight = + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); + Ok(Some(outcome.weight_used().saturating_add(pre_dispatch_weight)).into()) + }) + } + + /// Send an XCM program from the contract to the specified destination. + /// This is equivalent to dispatching `pallet_xcm::send` through `call_runtime`, except that + /// the function is called directly instead of being dispatched. + /// + /// # Parameters + /// + /// - `dest_ptr`: the pointer into the linear memory where the + /// [`xcm::prelude::VersionedMultiLocation`] is placed. + /// - `msg_ptr`: the pointer into the linear memory where the [`xcm::prelude::VersionedXcm`] is + /// placed. + /// - `msg_len`: the length of the message in bytes. + /// - `output_ptr`: the pointer into the linear memory where the [`xcm::v3::XcmHash`] message id + /// is placed. + /// + /// # Return Value + /// + /// Returns `ReturnCode::Success` when the message was successfully sent. When the XCM + /// execution fails, `ReturnCode::CallRuntimeFailed` is returned. + #[unstable] + fn xcm_send( + ctx: _, + memory: _, + dest_ptr: u32, + msg_ptr: u32, + msg_len: u32, + output_ptr: u32, + ) -> Result { + use xcm::{VersionedMultiLocation, VersionedXcm}; + use xcm_builder::{SendController, SendControllerWeightInfo}; + + ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; + let dest: VersionedMultiLocation = ctx.read_sandbox_memory_as(memory, dest_ptr)?; + + let message: VersionedXcm<()> = + ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + let weight = <::Xcm as SendController<_>>::WeightInfo::send(); + ctx.charge_gas(RuntimeCosts::CallRuntime(weight))?; + let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); + + match <::Xcm>::send(origin, dest.into(), message.into()) { + Ok(message_id) => { + ctx.write_sandbox_memory(memory, output_ptr, &message_id.encode())?; + Ok(ReturnCode::Success) + }, Err(e) => { if ctx.ext.append_debug_buffer("") { - ctx.ext.append_debug_buffer("seal0::call_runtime failed with: "); + ctx.ext.append_debug_buffer("seal0::xcm_send failed with: "); ctx.ext.append_debug_buffer(e.into()); }; - Ok(ReturnCode::CallRuntimeFailed) + Ok(ReturnCode::XcmSendFailed) }, } } -- GitLab From f53604362c98d596d829f37dbdf87fecbd05b7a5 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 15 Nov 2023 10:23:59 +0200 Subject: [PATCH 32/74] pallet-xcm: use XcmTeleportFilter for teleported fees in reserve transfers (#2322) Disallow reserve transfers that use teleportable fees if `(origin, fees)` matches `XcmTeleportFilter`. Add regression tests for filtering based on `XcmTeleportFilter` for both `(limited_)reserve_transfer_assets()` and `(limited_)teleport_assets` extrinsics. --- polkadot/xcm/pallet-xcm/src/lib.rs | 6 ++- polkadot/xcm/pallet-xcm/src/mock.rs | 29 +++++++++- .../pallet-xcm/src/tests/assets_transfer.rs | 54 +++++++++++++++++++ 3 files changed, 86 insertions(+), 3 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 8157620465f..38ea7555fc3 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -1379,7 +1379,7 @@ impl Pallet { TransferType::DestinationReserve => Self::destination_reserve_fees_instructions(dest, fees, weight_limit)?, TransferType::Teleport => - Self::teleport_fees_instructions(dest, fees, weight_limit)?, + Self::teleport_fees_instructions(origin_location, dest, fees, weight_limit)?, TransferType::RemoteReserve(_) => return Err(Error::::InvalidAssetUnsupportedReserve.into()), }); @@ -1715,10 +1715,14 @@ impl Pallet { } fn teleport_fees_instructions( + origin: MultiLocation, dest: MultiLocation, fees: MultiAsset, weight_limit: WeightLimit, ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + let value = (origin, vec![fees.clone()]); + ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); + let context = T::UniversalLocation::get(); let reanchored_fees = fees .clone() diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 026838993f1..e744cefb162 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -18,7 +18,8 @@ use codec::Encode; use frame_support::{ construct_runtime, match_types, parameter_types, traits::{ - AsEnsureOriginWithArg, ConstU128, ConstU32, Equals, Everything, EverythingBut, Nothing, + AsEnsureOriginWithArg, ConstU128, ConstU32, Contains, Equals, Everything, EverythingBut, + Nothing, }, weights::Weight, }; @@ -341,6 +342,9 @@ pub const USDT_PARA_ID: u32 = 2003; // This child parachain is not configured as trusted reserve or teleport location for any assets. pub const OTHER_PARA_ID: u32 = 2009; +// This child parachain is used for filtered/disallowed assets. +pub const FILTERED_PARA_ID: u32 = 2010; + parameter_types! { pub const RelayLocation: MultiLocation = Here.into_location(); pub const NativeAsset: MultiAsset = MultiAsset { @@ -384,6 +388,17 @@ parameter_types! { interior: X1(Parachain(USDT_PARA_ID)), }), }; + pub const FilteredTeleportLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(FILTERED_PARA_ID)) + }; + pub const FilteredTeleportAsset: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X1(Parachain(FILTERED_PARA_ID)), + }), + }; pub const AnyNetwork: Option = None; pub UniversalLocation: InteriorMultiLocation = Here; pub UnitWeightCost: u64 = 1_000; @@ -430,6 +445,7 @@ parameter_types! { pub TrustedLocal: (MultiAssetFilter, MultiLocation) = (All.into(), Here.into()); pub TrustedSystemPara: (MultiAssetFilter, MultiLocation) = (NativeAsset::get().into(), SystemParachainLocation::get()); pub TrustedUsdt: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), UsdtTeleportLocation::get()); + pub TrustedFilteredTeleport: (MultiAssetFilter, MultiLocation) = (FilteredTeleportAsset::get().into(), FilteredTeleportLocation::get()); pub TeleportUsdtToForeign: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), ForeignReserveLocation::get()); pub TrustedForeign: (MultiAssetFilter, MultiLocation) = (ForeignAsset::get().into(), ForeignReserveLocation::get()); pub TrustedUsdc: (MultiAssetFilter, MultiLocation) = (Usdc::get().into(), UsdcReserveLocation::get()); @@ -466,6 +482,7 @@ impl xcm_executor::Config for XcmConfig { Case, Case, Case, + Case, ); type UniversalLocation = UniversalLocation; type Barrier = Barrier; @@ -496,6 +513,14 @@ parameter_types! { pub static AdvertisedXcmVersion: pallet_xcm::XcmVersion = 3; } +pub struct XcmTeleportFiltered; +impl Contains<(MultiLocation, Vec)> for XcmTeleportFiltered { + fn contains(t: &(MultiLocation, Vec)) -> bool { + let filtered = FilteredTeleportAsset::get(); + t.1.iter().any(|asset| asset == &filtered) + } +} + impl pallet_xcm::Config for Test { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; @@ -503,7 +528,7 @@ impl pallet_xcm::Config for Test { type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmExecuteFilter = Everything; type XcmExecutor = XcmExecutor; - type XcmTeleportFilter = Everything; + type XcmTeleportFilter = EverythingBut; type XcmReserveTransferFilter = Everything; type Weigher = FixedWeightBounds; type UniversalLocation = UniversalLocation; diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index b02b0fd33c3..bf39e1ca288 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -117,6 +117,30 @@ fn limited_teleport_assets_works() { ); } +/// `limited_teleport_assets` should fail for filtered assets +#[test] +fn limited_teleport_filtered_assets_disallowed() { + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + new_test_ext_with_balances(vec![(ALICE, INITIAL_BALANCE)]).execute_with(|| { + let result = XcmPallet::limited_teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(FilteredTeleportLocation::get().into()), + Box::new(beneficiary.into()), + Box::new(FilteredTeleportAsset::get().into()), + 0, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered") + })) + ); + }); +} + /// Test `reserve_transfer_assets_with_paid_router_works` /// /// Asserts that the sender's balance is decreased and the beneficiary's balance @@ -1403,3 +1427,33 @@ fn reserve_transfer_assets_with_teleportable_asset_fails() { assert_eq!(Assets::active_issuance(usdt_id_multilocation), usdt_initial_local_amount); }); } + +/// Test `reserve_transfer_assets` with teleportable fee that is filtered - should fail. +#[test] +fn reserve_transfer_assets_with_filtered_teleported_fee_disallowed() { + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + new_test_ext_with_balances(vec![(ALICE, INITIAL_BALANCE)]).execute_with(|| { + let (assets, fee_index, _, _) = into_multiassets_checked( + // FilteredTeleportAsset for fees - teleportable but filtered + FilteredTeleportAsset::get().into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(FilteredTeleportLocation::get().into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered") + })) + ); + }); +} -- GitLab From 18165ebba88d41b4ba314685e5468ec0e809d799 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Wed, 15 Nov 2023 10:33:58 +0200 Subject: [PATCH 33/74] Unify `ChainSync` actions under one enum (follow-up) (#2317) Get rid of public `ChainSync::..._requests()` functions and return all requests as actions. --------- Co-authored-by: Sebastian Kunert --- .../client/network/sync/src/chain_sync.rs | 50 +++++++++++++---- substrate/client/network/sync/src/engine.rs | 55 +++++++++---------- substrate/client/network/sync/src/lib.rs | 2 +- .../client/network/sync/src/service/mod.rs | 4 +- .../{chain_sync.rs => syncing_service.rs} | 6 +- substrate/client/network/sync/src/warp.rs | 2 +- substrate/client/network/test/src/lib.rs | 2 +- 7 files changed, 74 insertions(+), 47 deletions(-) rename substrate/client/network/sync/src/service/{chain_sync.rs => syncing_service.rs} (98%) diff --git a/substrate/client/network/sync/src/chain_sync.rs b/substrate/client/network/sync/src/chain_sync.rs index 2adc6d42341..3825cfa33f7 100644 --- a/substrate/client/network/sync/src/chain_sync.rs +++ b/substrate/client/network/sync/src/chain_sync.rs @@ -191,6 +191,10 @@ pub enum ChainSyncAction { SendBlockRequest { peer_id: PeerId, request: BlockRequest }, /// Drop stale block request. CancelBlockRequest { peer_id: PeerId }, + /// Send state request to peer. + SendStateRequest { peer_id: PeerId, request: OpaqueStateRequest }, + /// Send warp proof request to peer. + SendWarpProofRequest { peer_id: PeerId, request: WarpProofRequest }, /// Peer misbehaved. Disconnect, report it and cancel the block request to it. DropPeer(BadPeer), /// Import blocks. @@ -1420,11 +1424,6 @@ where .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) } - /// Check if the peer is known to the sync state machine. Used for sanity checks. - pub fn is_peer_known(&self, peer_id: &PeerId) -> bool { - self.peers.contains_key(peer_id) - } - /// Get the set of downloaded blocks that are ready to be queued for import. fn ready_blocks(&mut self) -> Vec> { self.blocks @@ -1537,7 +1536,7 @@ where } /// Get justification requests scheduled by sync to be sent out. - pub fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { + fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { let peers = &mut self.peers; let mut matcher = self.extra_justifications.matcher(); std::iter::from_fn(move || { @@ -1564,7 +1563,7 @@ where } /// Get block requests scheduled by sync to be sent out. - pub fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { + fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { if self.mode == SyncMode::Warp { return self .warp_target_block_request() @@ -1691,7 +1690,7 @@ where } /// Get a state request scheduled by sync to be sent out (if any). - pub fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { + fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { if self.allowed_requests.is_empty() { return None } @@ -1737,7 +1736,7 @@ where } /// Get a warp proof request scheduled by sync to be sent out (if any). - pub fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { + fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { if let Some(sync) = &self.warp_sync { if self.allowed_requests.is_empty() || sync.is_complete() || @@ -2025,7 +2024,38 @@ where /// Get pending actions to perform. #[must_use] - pub fn take_actions(&mut self) -> impl Iterator> { + pub fn actions(&mut self) -> impl Iterator> { + let block_requests = self + .block_requests() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendBlockRequest { peer_id, request }); + self.actions.extend(block_requests); + + let justification_requests = self + .justification_requests() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendBlockRequest { peer_id, request }); + self.actions.extend(justification_requests); + + let state_request = self + .state_request() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendStateRequest { peer_id, request }); + self.actions.extend(state_request); + + let warp_proof_request = self + .warp_sync_request() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendWarpProofRequest { peer_id, request }); + self.actions.extend(warp_proof_request); + + std::mem::take(&mut self.actions).into_iter() + } + + /// A version of `actions()` that doesn't schedule extra requests. For testing only. + #[cfg(test)] + #[must_use] + fn take_actions(&mut self) -> impl Iterator> { std::mem::take(&mut self.actions).into_iter() } } diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 58a9fdc49f2..2cb8eab22f7 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -30,7 +30,7 @@ use crate::{ schema::v1::{StateRequest, StateResponse}, service::{ self, - chain_sync::{SyncingService, ToServiceCommand}, + syncing_service::{SyncingService, ToServiceCommand}, }, types::{ BadPeer, ExtendedPeerInfo, OpaqueStateRequest, OpaqueStateResponse, PeerRequest, SyncEvent, @@ -713,16 +713,13 @@ where self.is_major_syncing .store(self.chain_sync.status().state.is_major_syncing(), Ordering::Relaxed); - // Process actions requested by `ChainSync` during `select!`. + // Process actions requested by `ChainSync`. self.process_chain_sync_actions(); - - // Send outbound requests on `ChanSync`'s behalf. - self.send_chain_sync_requests(); } } fn process_chain_sync_actions(&mut self) { - self.chain_sync.take_actions().for_each(|action| match action { + self.chain_sync.actions().for_each(|action| match action { ChainSyncAction::SendBlockRequest { peer_id, request } => { // Sending block request implies dropping obsolete pending response as we are not // interested in it anymore (see [`ChainSyncAction::SendBlockRequest`]). @@ -741,7 +738,25 @@ where ChainSyncAction::CancelBlockRequest { peer_id } => { let removed = self.pending_responses.remove(&peer_id); - trace!(target: LOG_TARGET, "Processed {action:?}., response removed: {removed}."); + trace!(target: LOG_TARGET, "Processed {action:?}, response removed: {removed}."); + }, + ChainSyncAction::SendStateRequest { peer_id, request } => { + self.send_state_request(peer_id, request); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::SendBlockRequest` to {peer_id}.", + ); + }, + ChainSyncAction::SendWarpProofRequest { peer_id, request } => { + self.send_warp_proof_request(peer_id, request.clone()); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::SendWarpProofRequest` to {}, request: {:?}.", + peer_id, + request, + ); }, ChainSyncAction::DropPeer(BadPeer(peer_id, rep)) => { self.pending_responses.remove(&peer_id); @@ -1104,26 +1119,8 @@ where Ok(()) } - fn send_chain_sync_requests(&mut self) { - for (peer_id, request) in self.chain_sync.block_requests() { - self.send_block_request(peer_id, request); - } - - if let Some((peer_id, request)) = self.chain_sync.state_request() { - self.send_state_request(peer_id, request); - } - - for (peer_id, request) in self.chain_sync.justification_requests() { - self.send_block_request(peer_id, request); - } - - if let Some((peer_id, request)) = self.chain_sync.warp_sync_request() { - self.send_warp_sync_request(peer_id, request); - } - } - fn send_block_request(&mut self, peer_id: PeerId, request: BlockRequest) { - if !self.chain_sync.is_peer_known(&peer_id) { + if !self.peers.contains_key(&peer_id) { trace!(target: LOG_TARGET, "Cannot send block request to unknown peer {peer_id}"); debug_assert!(false); return @@ -1139,7 +1136,7 @@ where } fn send_state_request(&mut self, peer_id: PeerId, request: OpaqueStateRequest) { - if !self.chain_sync.is_peer_known(&peer_id) { + if !self.peers.contains_key(&peer_id) { trace!(target: LOG_TARGET, "Cannot send state request to unknown peer {peer_id}"); debug_assert!(false); return @@ -1168,8 +1165,8 @@ where } } - fn send_warp_sync_request(&mut self, peer_id: PeerId, request: WarpProofRequest) { - if !self.chain_sync.is_peer_known(&peer_id) { + fn send_warp_proof_request(&mut self, peer_id: PeerId, request: WarpProofRequest) { + if !self.peers.contains_key(&peer_id) { trace!(target: LOG_TARGET, "Cannot send warp proof request to unknown peer {peer_id}"); debug_assert!(false); return diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index c42b0601e65..1a7e773c95f 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -18,7 +18,7 @@ //! Blockchain syncing implementation in Substrate. -pub use service::chain_sync::SyncingService; +pub use service::syncing_service::SyncingService; pub use types::{SyncEvent, SyncEventStream, SyncState, SyncStatus, SyncStatusProvider}; mod block_announce_validator; diff --git a/substrate/client/network/sync/src/service/mod.rs b/substrate/client/network/sync/src/service/mod.rs index 18331d63ed2..d045af26e70 100644 --- a/substrate/client/network/sync/src/service/mod.rs +++ b/substrate/client/network/sync/src/service/mod.rs @@ -16,8 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! `ChainSync`-related service code +//! `SyncingEngine`-related service code -pub mod chain_sync; pub mod mock; pub mod network; +pub mod syncing_service; diff --git a/substrate/client/network/sync/src/service/chain_sync.rs b/substrate/client/network/sync/src/service/syncing_service.rs similarity index 98% rename from substrate/client/network/sync/src/service/chain_sync.rs rename to substrate/client/network/sync/src/service/syncing_service.rs index 3d11880c511..92d649d65dc 100644 --- a/substrate/client/network/sync/src/service/chain_sync.rs +++ b/substrate/client/network/sync/src/service/syncing_service.rs @@ -34,7 +34,7 @@ use std::{ }, }; -/// Commands send to `ChainSync` +/// Commands send to `SyncingEngine` pub enum ToServiceCommand { SetSyncForkRequest(Vec, B::Hash, NumberFor), RequestJustification(B::Hash, NumberFor), @@ -63,7 +63,7 @@ pub enum ToServiceCommand { // }, } -/// Handle for communicating with `ChainSync` asynchronously +/// Handle for communicating with `SyncingEngine` asynchronously #[derive(Clone)] pub struct SyncingService { tx: TracingUnboundedSender>, @@ -148,7 +148,7 @@ impl SyncingService { /// Get sync status /// - /// Returns an error if `ChainSync` has terminated. + /// Returns an error if `SyncingEngine` has terminated. pub async fn status(&self) -> Result, ()> { let (tx, rx) = oneshot::channel(); let _ = self.tx.unbounded_send(ToServiceCommand::Status(tx)); diff --git a/substrate/client/network/sync/src/warp.rs b/substrate/client/network/sync/src/warp.rs index 2c0adc856c1..169b3de35aa 100644 --- a/substrate/client/network/sync/src/warp.rs +++ b/substrate/client/network/sync/src/warp.rs @@ -42,7 +42,7 @@ const LOG_TARGET: &'static str = "sync"; pub struct EncodedProof(pub Vec); /// Warp sync request -#[derive(Encode, Decode, Debug)] +#[derive(Encode, Decode, Debug, Clone)] pub struct WarpProofRequest { /// Start collecting proofs from this block. pub begin: B::Hash, diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index f869e3a171a..cfc3cb7af3f 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -64,7 +64,7 @@ use sc_network_common::role::Roles; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, - service::{chain_sync::SyncingService, network::NetworkServiceProvider}, + service::{network::NetworkServiceProvider, syncing_service::SyncingService}, state_request_handler::StateRequestHandler, warp::{ AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncParams, WarpSyncProvider, -- GitLab From 3ab22fc7c732b537b6a2d153b5cbedcc4396ac7a Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 15 Nov 2023 12:37:59 +0100 Subject: [PATCH 34/74] Versioned assigner. Also fixed passed on core numbers. Bulk needs offset subtracted. --- .../runtime/parachains/src/assigner/mod.rs | 25 ++++++ .../src/{assigner.rs => assigner/v1.rs} | 77 +++++++++---------- 2 files changed, 61 insertions(+), 41 deletions(-) create mode 100644 polkadot/runtime/parachains/src/assigner/mod.rs rename polkadot/runtime/parachains/src/{assigner.rs => assigner/v1.rs} (74%) diff --git a/polkadot/runtime/parachains/src/assigner/mod.rs b/polkadot/runtime/parachains/src/assigner/mod.rs new file mode 100644 index 00000000000..f25820c192a --- /dev/null +++ b/polkadot/runtime/parachains/src/assigner/mod.rs @@ -0,0 +1,25 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The Polkadot multiplexing assignment provider. +//! Provides blockspace assignments for both bulk and on demand parachains. + +/// Implementations are versioned. +/// +/// The provided opaque `Assignment` type might evolve. Whenever that type gets changed, we need to +/// create a new version and make sure runtimes do proper migrations of any storage storing +/// `Assignment`. +pub mod v1; diff --git a/polkadot/runtime/parachains/src/assigner.rs b/polkadot/runtime/parachains/src/assigner/v1.rs similarity index 74% rename from polkadot/runtime/parachains/src/assigner.rs rename to polkadot/runtime/parachains/src/assigner/v1.rs index eea4f828863..0c971edbcff 100644 --- a/polkadot/runtime/parachains/src/assigner.rs +++ b/polkadot/runtime/parachains/src/assigner/v1.rs @@ -30,8 +30,8 @@ use primitives::{CoreIndex, Id as ParaId}; use crate::{ assigner_bulk, assigner_parachains as assigner_legacy, configuration, paras, scheduler::common::{ - Assignment, AssignmentProvider, AssignmentProviderConfig, AssignmentVersion, - FixedAssignmentProvider, V0Assignment, + Assignment, AssignmentProvider, AssignmentProviderConfig, FixedAssignmentProvider, + V0Assignment, }, }; @@ -85,51 +85,21 @@ impl Assignment for UnifiedAssignment< } } -impl Pallet { - // Helper fn for the AssignmentProvider implementation. - // Assumes that the first allocation of cores is to bulk parachains. - // This function will return false if there are no cores assigned to the bulk parachain - // assigner. - fn is_legacy_core(core_idx: &CoreIndex) -> bool { - let parachain_cores = as FixedAssignmentProvider< - BlockNumberFor, - >>::session_core_count(); - core_idx.0 < parachain_cores - } -} - impl AssignmentProvider> for Pallet { type AssignmentType = UnifiedAssignmentType; - type OldAssignmentType = V0Assignment; - - // Sum of underlying versions ensures this version will always get increased on changes. - const ASSIGNMENT_STORAGE_VERSION: AssignmentVersion = - >::ASSIGNMENT_STORAGE_VERSION - .saturating_add(>::ASSIGNMENT_STORAGE_VERSION); - - fn migrate_old_to_current( - old: Self::OldAssignmentType, - core: CoreIndex, - ) -> Self::AssignmentType { - if Self::is_legacy_core(&core) { - UnifiedAssignment::LegacyAuction( as AssignmentProvider< - BlockNumberFor, - >>::migrate_old_to_current(old, core)) - } else { - UnifiedAssignment::Bulk( as AssignmentProvider< - BlockNumberFor, - >>::migrate_old_to_current(old, core)) - } - } - /// Pops an `Assignment` from a specified `CoreIndex` fn pop_assignment_for_core(core_idx: CoreIndex) -> Option { - if Pallet::::is_legacy_core(&core_idx) { + let legacy_cores = as FixedAssignmentProvider< + BlockNumberFor>>::session_core_count(); + + if core_idx.0 < legacy_cores { as AssignmentProvider>>::pop_assignment_for_core( core_idx, ).map(UnifiedAssignment::LegacyAuction) } else { + let core_idx = CoreIndex(core_idx.0 - legacy_cores); + as AssignmentProvider>>::pop_assignment_for_core( core_idx, ) @@ -164,11 +134,16 @@ impl AssignmentProvider> for Pallet { } fn get_provider_config(core_idx: CoreIndex) -> AssignmentProviderConfig> { - if Pallet::::is_legacy_core(&core_idx) { + let legacy_cores = as FixedAssignmentProvider< + BlockNumberFor>>::session_core_count(); + + if core_idx.0 < legacy_cores { as AssignmentProvider>>::get_provider_config( core_idx, ) } else { + let core_idx = CoreIndex(core_idx.0 - legacy_cores); + as AssignmentProvider>>::get_provider_config( core_idx, ) @@ -178,13 +153,33 @@ impl AssignmentProvider> for Pallet { impl FixedAssignmentProvider> for Pallet { fn session_core_count() -> u32 { - let parachain_cores = as FixedAssignmentProvider< + let legacy_cores = as FixedAssignmentProvider< BlockNumberFor, >>::session_core_count(); let bulk_cores = as FixedAssignmentProvider>>::session_core_count( ); - parachain_cores.saturating_add(bulk_cores) + legacy_cores.saturating_add(bulk_cores) + } +} + +pub fn migrate_assignment_v0_to_v1( + old: V0Assignment, + core: CoreIndex, +) -> UnifiedAssignmentType { + let legacy_cores = as FixedAssignmentProvider< + BlockNumberFor, + >>::session_core_count(); + + if core.0 < legacy_cores { + UnifiedAssignment::LegacyAuction(assigner_legacy::ParachainsAssignment::from_v0_assignment( + old, + )) + } else { + // We are not subtracting `legacy_cores` from `core` here, as this was not done before for on-demand. + // Therefore we keep it as is, so the book keeping will affect the correct core in the underlying on-demand + // assignment provider. + UnifiedAssignment::Bulk(assigner_bulk::BulkAssignment::from_v0_assignment(old, core)) } } -- GitLab From 7efec231c7a042ad592a891c8e6294a902ec71a5 Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 15 Nov 2023 12:39:13 +0100 Subject: [PATCH 35/74] Sound migration story. --- .../parachains/src/assigner_bulk/mod.rs | 29 +- .../parachains/src/assigner_on_demand/mod.rs | 21 +- .../parachains/src/assigner_parachains.rs | 15 +- .../parachains/src/scheduler/common.rs | 24 +- .../parachains/src/scheduler/migration.rs | 260 ++++++++++-------- 5 files changed, 170 insertions(+), 179 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_bulk/mod.rs b/polkadot/runtime/parachains/src/assigner_bulk/mod.rs index 30bddc3046d..801762dd6e9 100644 --- a/polkadot/runtime/parachains/src/assigner_bulk/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_bulk/mod.rs @@ -31,8 +31,8 @@ mod tests; use crate::{ assigner_on_demand, configuration, paras, scheduler::common::{ - Assignment, AssignmentProvider, AssignmentProviderConfig, AssignmentVersion, - FixedAssignmentProvider, + Assignment, AssignmentProvider, AssignmentProviderConfig, FixedAssignmentProvider, + V0Assignment, }, }; @@ -256,24 +256,17 @@ impl Assignment for BulkAssignment { } } -impl AssignmentProvider> for Pallet { - type AssignmentType = BulkAssignmentType; - - type OldAssignmentType = - as AssignmentProvider>>::OldAssignmentType; - - const ASSIGNMENT_STORAGE_VERSION: AssignmentVersion = AssignmentVersion::new(0) - .saturating_add(>::ASSIGNMENT_STORAGE_VERSION); - - fn migrate_old_to_current( - old: Self::OldAssignmentType, - core: CoreIndex, - ) -> Self::AssignmentType { - // Previous version all assignments had been on-demand (bulk was not a thing): - BulkAssignment::Instantaneous(>::migrate_old_to_current( - old, core, +impl BulkAssignment { + pub(crate) fn from_v0_assignment(v0: V0Assignment, core_index: CoreIndex) -> Self { + // There have been no bulk cores previously: + BulkAssignment::Instantaneous(assigner_on_demand::OnDemandAssignment::from_v0_assignment( + v0, core_index, )) } +} + +impl AssignmentProvider> for Pallet { + type AssignmentType = BulkAssignmentType; fn pop_assignment_for_core(core_idx: CoreIndex) -> Option { let now = >::block_number(); diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 38abe465626..cd99681a380 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -34,9 +34,7 @@ mod tests; use crate::{ configuration, paras, - scheduler::common::{ - Assignment, AssignmentProvider, AssignmentProviderConfig, AssignmentVersion, V0Assignment, - }, + scheduler::common::{Assignment, AssignmentProvider, AssignmentProviderConfig, V0Assignment}, }; use frame_support::{ @@ -118,11 +116,15 @@ pub struct OnDemandAssignment { core_index: CoreIndex, } -#[cfg(test)] impl OnDemandAssignment { + #[cfg(test)] pub(crate) fn new(para_id: ParaId, core_index: CoreIndex) -> Self { Self { para_id, core_index } } + + pub(crate) fn from_v0_assignment(v0: V0Assignment, core_index: CoreIndex) -> Self { + Self { para_id: v0.para_id, core_index } + } } impl Assignment for OnDemandAssignment { @@ -569,17 +571,6 @@ where impl AssignmentProvider> for Pallet { type AssignmentType = OnDemandAssignment; - type OldAssignmentType = V0Assignment; - - const ASSIGNMENT_STORAGE_VERSION: AssignmentVersion = AssignmentVersion::new(1); - - fn migrate_old_to_current( - old: Self::OldAssignmentType, - core: CoreIndex, - ) -> Self::AssignmentType { - OnDemandAssignment { para_id: old.para_id, core_index: core } - } - /// Take the next queued entry that is available for a given core index. /// Invalidates and removes orders with a `para_id` that is not `ParaLifecycle::Parathread` /// but only in [0..P] range slice of the order queue, where P is the element that is diff --git a/polkadot/runtime/parachains/src/assigner_parachains.rs b/polkadot/runtime/parachains/src/assigner_parachains.rs index ef1b97df8d5..4f2fdd47b05 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains.rs @@ -30,8 +30,8 @@ use sp_runtime::codec::{Decode, Encode}; use crate::{ configuration, paras, scheduler::common::{ - Assignment, AssignmentProvider, AssignmentProviderConfig, AssignmentVersion, - FixedAssignmentProvider, V0Assignment, + Assignment, AssignmentProvider, AssignmentProviderConfig, FixedAssignmentProvider, + V0Assignment, }, }; @@ -58,6 +58,10 @@ impl ParachainsAssignment { fn new(para_id: ParaId) -> Self { Self { para_id } } + + pub(crate) fn from_v0_assignment(v0: V0Assignment) -> Self { + Self { para_id: v0.para_id } + } } impl Assignment for ParachainsAssignment { @@ -68,13 +72,6 @@ impl Assignment for ParachainsAssignment { impl AssignmentProvider> for Pallet { type AssignmentType = ParachainsAssignment; - type OldAssignmentType = V0Assignment; - // Format has not changed for parachains, therefore still version 0. - const ASSIGNMENT_STORAGE_VERSION: AssignmentVersion = AssignmentVersion::new(0); - - fn migrate_old_to_current(old: Self::OldAssignmentType, _: CoreIndex) -> Self::AssignmentType { - ParachainsAssignment { para_id: old.para_id } - } fn pop_assignment_for_core(core_idx: CoreIndex) -> Option { >::parachains() diff --git a/polkadot/runtime/parachains/src/scheduler/common.rs b/polkadot/runtime/parachains/src/scheduler/common.rs index d811951306c..832c6cd2f8f 100644 --- a/polkadot/runtime/parachains/src/scheduler/common.rs +++ b/polkadot/runtime/parachains/src/scheduler/common.rs @@ -144,31 +144,9 @@ pub trait AssignmentProvider { /// functions. /// /// As the lifetime of an assignment might outlive the current process (and need persistence), - /// we provide this type in a versioned fashion. This is where `OldAssignmentType` below and - /// `ASSIGNMENT_STORAGE_VERSION` come into play. + /// make sure to migrate using code if you change the `AssignmentProvider` implementation. type AssignmentType: Assignment + Encode + Decode + TypeInfo + Debug; - /// Previous version of assignments. - /// - /// Useful for migrating persisted assignments to the new version. - type OldAssignmentType: Assignment + Encode + Decode + TypeInfo + Debug; - - /// What version the binary format of the `AssignmentType` has. - /// - /// Will be bumped whenver the storage format of `AssignmentType` changes. If this version - /// differs from the version persisted you need to decode `OldAssignmentType` and migrate to the - /// new one via `migrate_old_to_current`. - const ASSIGNMENT_STORAGE_VERSION: AssignmentVersion; - - /// Migrate an old Assignment to the current format. - /// - /// In addition to the old assignment the core this assignment has been scheduled to, needs to - /// be provided. - fn migrate_old_to_current( - old: Self::OldAssignmentType, - core: CoreIndex, - ) -> Self::AssignmentType; - /// Pops an [`Assignment`] from the provider for a specified [`CoreIndex`]. /// /// This is where assignments come into existance. diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index 0039a9e5315..0e3b5c6daae 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -29,32 +29,78 @@ pub mod assignment_version { use super::*; use crate::scheduler::{self, common::AssignmentVersion}; - pub struct MigrateAssignment(sp_std::marker::PhantomData); + pub trait AssignmentMigration { + const ON_CHAIN_STORAGE_VERSION: AssignmentVersion; + const STORAGE_VERSION: AssignmentVersion; - /// Previously used `ParasEntryType`. - pub type OldParasEntryType = ParasEntry, OldAssignmentType>; + type OldType: Encode + Decode + TypeInfo + 'static; + type NewType: Encode + Decode + TypeInfo + 'static; - /// Previously used assignment type: - pub(crate) type OldAssignmentType = <::AssignmentProvider as AssignmentProvider< - BlockNumberFor, - >>::OldAssignmentType; + fn migrate(core_idx: CoreIndex, old: Self::OldType) -> Self::NewType; + } - /// ClaimQueue using old assignments. - #[storage_alias] - pub(crate) type ClaimQueue = - StorageValue, BTreeMap>>, ValueQuery>; + mod old { + use super::*; + /// Previously used `ParasEntryType`. + pub type ParasEntryType = ParasEntry, AssignmentType>; + + /// Previously used assignment type: + pub(crate) type AssignmentType = ::OldType; + + /// ClaimQueue using old assignments. + #[storage_alias] + pub(crate) type ClaimQueue = StorageValue< + Pallet, + BTreeMap>>, + ValueQuery, + >; + + #[storage_alias] + pub(crate) type AvailabilityCores = + StorageValue, Vec>, ValueQuery>; + + /// Conveninece type alias for `CoreOccupied`. + pub type CoreOccupiedType = CoreOccupied, AssignmentType>; + } + + mod new { + use super::*; + /// Previously used `ParasEntryType`. + pub type ParasEntryType = ParasEntry, AssignmentType>; + + /// Now used assignment type: + pub(crate) type AssignmentType = ::NewType; + + /// ClaimQueue using new assignments. + #[storage_alias] + pub(crate) type ClaimQueue = StorageValue< + Pallet, + BTreeMap>>, + ValueQuery, + >; + + /// Avaialbility cores using new assignments. + #[storage_alias] + pub(crate) type AvailabilityCores = + StorageValue, Vec>, ValueQuery>; + + /// Conveninece type alias for `CoreOccupied`. + pub type CoreOccupiedType = CoreOccupied, AssignmentType>; + } - impl OnRuntimeUpgrade for MigrateAssignment { + pub struct MigrateAssignment( + sp_std::marker::PhantomData, + sp_std::marker::PhantomData, + ); + + impl OnRuntimeUpgrade for MigrateAssignment { fn on_runtime_upgrade() -> Weight { - let assignment_version = , - >>::ASSIGNMENT_STORAGE_VERSION; // Is a migration necessary? - if AssignmentVersion::get::>() < assignment_version { - let mut weight = migrate_assignments::(); + if AssignmentVersion::get::>() == M::ON_CHAIN_STORAGE_VERSION { + let mut weight = migrate_assignments::(); - log::info!(target: scheduler::LOG_TARGET, "Migrating para scheduler assginments to {:?}", assignment_version); - assignment_version.put::>(); + log::info!(target: scheduler::LOG_TARGET, "Migrating para scheduler assginments to {:?}", M::STORAGE_VERSION); + M::STORAGE_VERSION.put::>(); weight += T::DbWeight::get().reads_writes(1, 1); weight @@ -66,37 +112,71 @@ pub mod assignment_version { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::DispatchError> { - log::trace!( - target: crate::scheduler::LOG_TARGET, - "ClaimQueue before migration: {}", - ClaimQueue::::get().len() - ); - - let bytes = u32::to_be_bytes(v1::ClaimQueue::::get().len() as u32); - - Ok(bytes.to_vec()) + Ok(AssignmentVersion::get::>().encode()) } #[cfg(feature = "try-runtime")] fn post_upgrade(state: Vec) -> Result<(), sp_runtime::DispatchError> { - let assignment_version = , - >>::ASSIGNMENT_STORAGE_VERSION; - log::trace!(target: crate::scheduler::LOG_TARGET, "Running post_upgrade()"); - ensure!( - AssignmentVersion::get::>() == assignment_version, - "Assignment version should should match current version after the migration" - ); - - let old_len = u32::from_be_bytes(state.try_into().unwrap()); - ensure!( - Pallet::::claimqueue_len() as u32 == old_len, - "Old ClaimQueue completely moved to new ClaimQueue after migration" - ); - + // Did migration take place? + if state.decode()? == M::ON_CHAIN_STORAGE_VERSION { + ensure!( + AssignmentVersion::get::>() == M::STORAGE_VERSION, + "Assignment version should should match current version after the migration." + ); + } Ok(()) } } + + pub fn migrate_assignments() -> Weight { + let mut weight: Weight = Weight::zero(); + + // Claimqueue migration: + let old = old::ClaimQueue::::take(); + + let new = old + .into_iter() + .map(|(core, v)| { + ( + core, + v.into_iter() + .map(|old| migrate_assignment_paras_entry::(core, old)) + .collect::>(), + ) + }) + .collect::>>>(); + new::ClaimQueue::::put(new); + + weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + + // Availability cores migration: + let old = old::AvailabilityCores::::take(); + + let new = old + .into_iter() + .enumerate() + .map(|(i, o)| match o { + CoreOccupied::Free => CoreOccupied::Free, + CoreOccupied::Paras(entry) => CoreOccupied::Paras( + migrate_assignment_paras_entry::(CoreIndex(i as _), entry), + ), + }) + .collect::>>(); + new::AvailabilityCores::::put(new); + + weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + + weight + } + + fn migrate_assignment_paras_entry( + core: CoreIndex, + old: old::ParasEntryType, + ) -> new::ParasEntryType { + let super::ParasEntry { assignment, availability_timeouts, ttl } = old; + + super::ParasEntry { assignment: M::migrate(core, assignment), availability_timeouts, ttl } + } } /// Old scheduler with explicit parathreads and `Scheduled` storage instead of `ClaimQueue`. @@ -255,8 +335,8 @@ pub mod v1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::DispatchError> { - let n: u32 = v0::Scheduled::::get().len() as u32 + - v0::AvailabilityCores::::get().iter().filter(|c| c.is_some()).count() as u32; + let n: u32 = v1::ClaimQueue::::get().len() as u32 + + v1::AvailabilityCores::::get().iter().filter(|c| c.is_some()).count() as u32; log::info!( target: scheduler::LOG_TARGET, @@ -270,11 +350,6 @@ pub mod v1 { fn post_upgrade(state: Vec) -> Result<(), sp_runtime::DispatchError> { log::info!(target: crate::scheduler::LOG_TARGET, "Running post_upgrade()"); - ensure!( - v0::Scheduled::::get().is_empty(), - "Scheduled should be empty after the migration" - ); - let expected_len = u32::decode(&mut &state[..]).unwrap(); let availability_cores_waiting = super::AvailabilityCores::::get() .iter() @@ -295,7 +370,9 @@ pub mod v1 { pub mod v2 { use super::*; use crate::scheduler; - use frame_support::traits::StorageVersion; + + // ParasEntry unchanged: + pub type ParasEntry = super::v1::ParasEntry; // V2 (no Option wrapper), but still old Assignment format. // @@ -303,26 +380,28 @@ pub mod v2 { #[storage_alias] pub(crate) type ClaimQueue = StorageValue< Pallet, - BTreeMap>>>, + BTreeMap>>>, ValueQuery, >; - pub struct MigrateToV2(sp_std::marker::PhantomData); + #[allow(deprecated)] + pub type MigrateToV2 = VersionedMigration< + 1, + 2, + UncheckedMigrateToV2, + Pallet, + ::DbWeight, + >; - impl OnRuntimeUpgrade for MigrateToV2 { + pub struct UncheckedMigrateToV2(sp_std::marker::PhantomData); + + impl OnRuntimeUpgrade for UncheckedMigrateToV2 { fn on_runtime_upgrade() -> Weight { - if StorageVersion::get::>() == 1 { - let mut weight_consumed = migrate_to_v2::(); + let weight_consumed = migrate_to_v2::(); - log::info!(target: scheduler::LOG_TARGET, "Migrating para scheduler storage to v2"); - StorageVersion::new(2).put::>(); + log::info!(target: scheduler::LOG_TARGET, "Migrating para scheduler storage to v2"); - weight_consumed += T::DbWeight::get().reads_writes(1, 1); - weight_consumed - } else { - log::warn!(target: scheduler::LOG_TARGET, "Para scheduler v2 migration should be removed."); - T::DbWeight::get().reads(1) - } + weight_consumed } #[cfg(feature = "try-runtime")] @@ -341,10 +420,6 @@ pub mod v2 { #[cfg(feature = "try-runtime")] fn post_upgrade(state: Vec) -> Result<(), sp_runtime::DispatchError> { log::trace!(target: crate::scheduler::LOG_TARGET, "Running post_upgrade()"); - ensure!( - StorageVersion::get::>() >= 2, - "Storage version should be at least `2` after the migration" - ); let old_len = u32::from_be_bytes(state.try_into().unwrap()); ensure!( @@ -357,45 +432,18 @@ pub mod v2 { } } -pub fn migrate_assignments() -> Weight { - use assignment_version::ClaimQueue as OldClaimQueue; - - let mut weight: Weight = Weight::zero(); - - let old = OldClaimQueue::::take(); - let old_len = old.len() as u64; - - let new = old - .into_iter() - .map(|(core, v)| { - ( - core, - v.into_iter() - .map(|old| migrate_assignment_paras_entry::(core, old)) - .collect::>(), - ) - }) - .collect::>>>(); - ClaimQueue::::put(new); - - weight = weight.saturating_add(T::DbWeight::get().reads_writes(2 * old_len, 2 * old_len)); - - weight -} - -// Migrate to v2 (remove wrapping `Option`), but still with old assignment format. +// Migrate to v2 (remove wrapping `Option`). pub fn migrate_to_v2() -> Weight { let mut weight: Weight = Weight::zero(); let old = v1::ClaimQueue::::take(); - let old_len = old.len() as u64; let new = old .into_iter() .map(|(k, v)| (k, v.into_iter().filter_map(identity).collect::>())) - .collect::>>>>(); + .collect::>>>>(); v2::ClaimQueue::::put(new); - weight = weight.saturating_add(T::DbWeight::get().reads_writes(2 * old_len, 2 * old_len)); + weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); weight } @@ -450,20 +498,4 @@ pub fn migrate_to_v1() -> Weight { weight } -fn migrate_assignment_paras_entry( - core: CoreIndex, - old: assignment_version::OldParasEntryType, -) -> ParasEntryType { - let ParasEntry { assignment, availability_timeouts, ttl } = old; - - ParasEntry { - assignment: - >>::migrate_old_to_current( - assignment, core, - ), - availability_timeouts, - ttl, - } -} - // TODO: Tests! -- GitLab From 728af59dc829a727d9eacdef81d385ed8389ff85 Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 15 Nov 2023 12:39:30 +0100 Subject: [PATCH 36/74] Add migration to Rococo. --- polkadot/runtime/rococo/src/lib.rs | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 1f07b3a219c..56545b6592d 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -959,6 +959,8 @@ impl parachains_paras_inherent::Config for Runtime { } impl parachains_scheduler::Config for Runtime { + // If you change this, make sure the `Assignment` type of the new provider is binary compatible, + // otherwise provide a migration. type AssignmentProvider = ParaAssignmentProvider; } @@ -972,6 +974,11 @@ impl parachains_assigner_on_demand::Config for Runtime { type TrafficDefaultValue = OnDemandTrafficDefaultValue; type WeightInfo = weights::runtime_parachains_assigner_on_demand::WeightInfo; } +impl parachains_assigner_bulk::Config for Runtime { + // FIXME: Proper weights: + type WeightInfo = (); + // type WeightInfo = weights::runtime_parachains_assigner_bulk::WeightInfo; +} impl parachains_assigner_parachains::Config for Runtime {} @@ -1475,7 +1482,7 @@ pub mod migrations { assigned_slots::migration::v1::VersionCheckedMigrateToV1, parachains_scheduler::migration::v1::MigrateToV1, parachains_scheduler::migration::v2::MigrateToV2, - parachains_scheduler::migration::assignment_version::MigrateAssignment, + parachains_scheduler::migration::assignment_version::MigrateAssignment>, parachains_configuration::migration::v8::MigrateToV8, parachains_configuration::migration::v9::MigrateToV9, paras_registrar::migration::VersionCheckedMigrateToV1, @@ -1497,6 +1504,21 @@ pub mod migrations { frame_support::migrations::RemovePallet::DbWeight>, frame_support::migrations::RemovePallet::DbWeight>, ); + + /// We are swapping out the assignment type in the scheduler for coretime. + struct SchedulerAssignmentMigration(sp_std::marker::PhantomData); + impl parachains_scheduler::migration::assignment_version::AssignmentMigration + for SchedulerAssignmentMigration + { + const ON_CHAIN_STORAGE_VERSION: u16 = 0; + const STORAGE_VERSION: u16 = 1; + type OldType = parachains_scheduler::common::V0Assignment; + type NewType = parachains_scheduler::assigner::v1::UnifiedAssignmentType; + + fn migrate(core_idx: CoreIndex, old: Self::OldType) -> Self::NewType { + parachains_scheduler::assigner::migrate_assignment_v0_to_v1(old, core_idx) + } + } } /// Executive: handles dispatch to the various modules. -- GitLab From cd40661447bc748086f8b1e9634449fb809665cc Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 15 Nov 2023 12:39:46 +0100 Subject: [PATCH 37/74] Add remark to Westend. --- polkadot/runtime/westend/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 3e92806cfac..911ffef34d8 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1206,6 +1206,8 @@ impl parachains_paras_inherent::Config for Runtime { } impl parachains_scheduler::Config for Runtime { + // If you change this, make sure the `Assignment` type of the new provider is binary compatible, + // otherwise provide a migration. type AssignmentProvider = ParaAssignmentProvider; } -- GitLab From cfc860a58ee0cb032ce6fc4b8e54879b04927447 Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 15 Nov 2023 13:36:03 +0100 Subject: [PATCH 38/74] Fix Rococo. --- polkadot/runtime/parachains/src/assigner/v1.rs | 12 +++++++----- .../parachains/src/scheduler/migration.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 18 +++++++++++------- 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner/v1.rs b/polkadot/runtime/parachains/src/assigner/v1.rs index 0c971edbcff..b6a74451052 100644 --- a/polkadot/runtime/parachains/src/assigner/v1.rs +++ b/polkadot/runtime/parachains/src/assigner/v1.rs @@ -91,7 +91,8 @@ impl AssignmentProvider> for Pallet { /// Pops an `Assignment` from a specified `CoreIndex` fn pop_assignment_for_core(core_idx: CoreIndex) -> Option { let legacy_cores = as FixedAssignmentProvider< - BlockNumberFor>>::session_core_count(); + BlockNumberFor, + >>::session_core_count(); if core_idx.0 < legacy_cores { as AssignmentProvider>>::pop_assignment_for_core( @@ -135,7 +136,8 @@ impl AssignmentProvider> for Pallet { fn get_provider_config(core_idx: CoreIndex) -> AssignmentProviderConfig> { let legacy_cores = as FixedAssignmentProvider< - BlockNumberFor>>::session_core_count(); + BlockNumberFor, + >>::session_core_count(); if core_idx.0 < legacy_cores { as AssignmentProvider>>::get_provider_config( @@ -177,9 +179,9 @@ pub fn migrate_assignment_v0_to_v1( old, )) } else { - // We are not subtracting `legacy_cores` from `core` here, as this was not done before for on-demand. - // Therefore we keep it as is, so the book keeping will affect the correct core in the underlying on-demand - // assignment provider. + // We are not subtracting `legacy_cores` from `core` here, as this was not done before for + // on-demand. Therefore we keep it as is, so the book keeping will affect the correct core + // in the underlying on-demand assignment provider. UnifiedAssignment::Bulk(assigner_bulk::BulkAssignment::from_v0_assignment(old, core)) } } diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index 0e3b5c6daae..be1025bec79 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -65,7 +65,7 @@ pub mod assignment_version { mod new { use super::*; - /// Previously used `ParasEntryType`. + /// Now used `ParasEntryType`. pub type ParasEntryType = ParasEntry, AssignmentType>; /// Now used assignment type: diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 56545b6592d..62bd05cd85b 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -41,7 +41,8 @@ use scale_info::TypeInfo; use sp_std::{cmp::Ordering, collections::btree_map::BTreeMap, prelude::*}; use runtime_parachains::{ - assigner as parachains_assigner, assigner_on_demand as parachains_assigner_on_demand, + assigner::v1 as parachains_assigner_v1, + assigner_bulk as parachains_assigner_bulk, assigner_on_demand as parachains_assigner_on_demand, assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, disputes::slashing as parachains_slashing, @@ -982,7 +983,7 @@ impl parachains_assigner_bulk::Config for Runtime { impl parachains_assigner_parachains::Config for Runtime {} -impl parachains_assigner::Config for Runtime {} +impl parachains_assigner_v1::Config for Runtime {} impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; @@ -1361,7 +1362,7 @@ construct_runtime! { ParasDisputes: parachains_disputes::{Pallet, Call, Storage, Event} = 62, ParasSlashing: parachains_slashing::{Pallet, Call, Storage, ValidateUnsigned} = 63, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 64, - ParaAssignmentProvider: parachains_assigner::{Pallet, Storage} = 65, + ParaAssignmentProvider: parachains_assigner_v1::{Pallet, Storage} = 65, OnDemandAssignmentProvider: parachains_assigner_on_demand::{Pallet, Call, Storage, Event} = 66, ParachainsAssignmentProvider: parachains_assigner_parachains::{Pallet} = 67, @@ -1430,6 +1431,8 @@ pub mod migrations { use frame_support::traits::LockIdentifier; use frame_system::pallet_prelude::BlockNumberFor; + use parachains_scheduler::common::AssignmentVersion; + use primitives::CoreIndex; parameter_types! { pub const DemocracyPalletName: &'static str = "Democracy"; @@ -1510,13 +1513,14 @@ pub mod migrations { impl parachains_scheduler::migration::assignment_version::AssignmentMigration for SchedulerAssignmentMigration { - const ON_CHAIN_STORAGE_VERSION: u16 = 0; - const STORAGE_VERSION: u16 = 1; + const ON_CHAIN_STORAGE_VERSION: AssignmentVersion = AssignmentVersion::new(0); + const STORAGE_VERSION: AssignmentVersion = AssignmentVersion::new(1); + type OldType = parachains_scheduler::common::V0Assignment; - type NewType = parachains_scheduler::assigner::v1::UnifiedAssignmentType; + type NewType = parachains_assigner_v1::UnifiedAssignmentType; fn migrate(core_idx: CoreIndex, old: Self::OldType) -> Self::NewType { - parachains_scheduler::assigner::migrate_assignment_v0_to_v1(old, core_idx) + parachains_assigner_v1::migrate_assignment_v0_to_v1::(old, core_idx) } } } -- GitLab From 5b0622bc4d0cf5f0f57904874ea90eb08de79c52 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Wed, 15 Nov 2023 14:28:32 +0100 Subject: [PATCH 39/74] [CI] Prepare CI for Merge Queues (#2308) PR prepares CI to the GitHub Merge Queues. All github actions that were running in PR adjusted so they can run in the merge queues. Zombienet jobs will do nothing during PRs but they will run during merge queues. Jobs that will be skipped during PR: - all zombienet jobs - all publish docker jobs Jobs that will be skipped during merge queue: - check-labels - check-prdoc - pr-custom-review - review trigger cc https://github.com/paritytech/ci_cd/issues/862 --- .github/workflows/check-labels.yml | 4 ++++ .github/workflows/check-licenses.yml | 1 + .github/workflows/check-links.yml | 1 + .github/workflows/check-markdown.yml | 5 +++-- .github/workflows/check-prdoc.yml | 4 ++++ .github/workflows/check-publish.yml | 1 + .github/workflows/fmt-check.yml | 1 + .github/workflows/gitspiegel-trigger.yml | 1 + .github/workflows/pr-custom-review.yml | 4 ++++ .github/workflows/review-trigger.yml | 6 +++++- .gitlab-ci.yml | 8 +++++++- .gitlab/pipeline/publish.yml | 4 ++-- .gitlab/pipeline/zombienet/cumulus.yml | 2 ++ .gitlab/pipeline/zombienet/polkadot.yml | 18 +++++++++++++----- .gitlab/pipeline/zombienet/substrate.yml | 2 ++ 15 files changed, 51 insertions(+), 11 deletions(-) diff --git a/.github/workflows/check-labels.yml b/.github/workflows/check-labels.yml index 83b52e82313..97562f0da09 100644 --- a/.github/workflows/check-labels.yml +++ b/.github/workflows/check-labels.yml @@ -3,11 +3,15 @@ name: Check labels on: pull_request: types: [labeled, opened, synchronize, unlabeled] + merge_group: jobs: check-labels: runs-on: ubuntu-latest steps: + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 - name: Pull image env: IMAGE: paritytech/ruled_labels:0.4.0 diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index 50dd10a6d3c..a66e3a53998 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -2,6 +2,7 @@ name: Check licenses on: pull_request: + merge_group: permissions: packages: read diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 3ed6ba84b82..0932d38c9ad 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -8,6 +8,7 @@ on: - ".github/workflows/check-links.yml" - ".config/lychee.toml" types: [opened, synchronize, reopened, ready_for_review] + merge_group: permissions: packages: read diff --git a/.github/workflows/check-markdown.yml b/.github/workflows/check-markdown.yml index 05b5d898d67..2108f942090 100644 --- a/.github/workflows/check-markdown.yml +++ b/.github/workflows/check-markdown.yml @@ -3,6 +3,7 @@ name: Check Markdown on: pull_request: types: [opened, synchronize, reopened, ready_for_review] + merge_group: permissions: packages: read @@ -23,8 +24,8 @@ jobs: - name: Install tooling run: | - npm install -g markdownlint-cli - markdownlint --version + npm install -g markdownlint-cli + markdownlint --version - name: Check Markdown env: diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index 690f7a3f133..54e4d2b9680 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -3,6 +3,7 @@ name: Check PRdoc on: pull_request: types: [labeled, opened, synchronize, unlabeled] + merge_group: env: IMAGE: paritytech/prdoc:v0.0.5 @@ -17,6 +18,9 @@ jobs: check-prdoc: runs-on: ubuntu-latest steps: + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 - name: Pull image run: | echo "Pulling $IMAGE" diff --git a/.github/workflows/check-publish.yml b/.github/workflows/check-publish.yml index 9ab47dba51b..c0d2b889381 100644 --- a/.github/workflows/check-publish.yml +++ b/.github/workflows/check-publish.yml @@ -6,6 +6,7 @@ on: - master pull_request: types: [opened, synchronize, reopened, ready_for_review] + merge_group: jobs: check-publish: diff --git a/.github/workflows/fmt-check.yml b/.github/workflows/fmt-check.yml index 7ca4413bb05..e4d39acabfd 100644 --- a/.github/workflows/fmt-check.yml +++ b/.github/workflows/fmt-check.yml @@ -6,6 +6,7 @@ on: - master pull_request: types: [opened, synchronize, reopened, ready_for_review] + merge_group: jobs: quick_check: diff --git a/.github/workflows/gitspiegel-trigger.yml b/.github/workflows/gitspiegel-trigger.yml index dce3aaf2fec..59347fad6d6 100644 --- a/.github/workflows/gitspiegel-trigger.yml +++ b/.github/workflows/gitspiegel-trigger.yml @@ -13,6 +13,7 @@ on: - unlocked - ready_for_review - reopened + merge_group: jobs: sync: diff --git a/.github/workflows/pr-custom-review.yml b/.github/workflows/pr-custom-review.yml index b15d20c696f..4e0809cbfdc 100644 --- a/.github/workflows/pr-custom-review.yml +++ b/.github/workflows/pr-custom-review.yml @@ -14,11 +14,15 @@ on: - ready_for_review - converted_to_draft pull_request_review: + merge_group: jobs: pr-custom-review: runs-on: ubuntu-latest steps: + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 - name: Skip if pull request is in Draft # `if: github.event.pull_request.draft == true` should be kept here, at # the step level, rather than at the job level. The latter is not diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 2810ea356e6..e5fcb434fd3 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -1,6 +1,6 @@ name: Review-Trigger -on: +on: pull_request_target: types: - opened @@ -10,6 +10,7 @@ on: - review_request_removed - ready_for_review pull_request_review: + merge_group: jobs: trigger-review-bot: @@ -18,6 +19,9 @@ jobs: runs-on: ubuntu-latest name: trigger review bot steps: + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 - name: Get PR number env: PR_NUMBER: ${{ github.event.pull_request.number }} diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f507afda23e..1dc483004f2 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -31,7 +31,7 @@ variables: NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.79" - DOCKER_IMAGES_VERSION: "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" + DOCKER_IMAGES_VERSION: "${CI_COMMIT_SHA}" default: retry: @@ -136,11 +136,13 @@ default: - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 .test-pr-refs: rules: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues # handle the specific case where benches could store incorrect bench data because of the downstream staging runs # exclude cargo-check-benches from such runs @@ -152,6 +154,7 @@ default: - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 .test-refs-no-trigger: @@ -162,6 +165,7 @@ default: - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ @@ -172,6 +176,7 @@ default: - if: $CI_PIPELINE_SOURCE == "web" - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues .publish-refs: rules: @@ -192,6 +197,7 @@ default: - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues .zombienet-refs: extends: .build-refs diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index a03d407c040..f2308c334e0 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -71,8 +71,8 @@ publish-rustdoc: DOCKERFILE: "" # docker/path-to.Dockerfile IMAGE_NAME: "" # docker.io/paritypr/image_name script: - # - test "$PARITYPR_USER" -a "$PARITYPR_PASS" || - # ( echo "no docker credentials provided"; exit 1 ) + # Exit if the job is not running in a merge queue + - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - $BUILDAH_COMMAND build --format=docker --build-arg VCS_REF="${CI_COMMIT_SHA}" diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index 3f2c6f64fbf..c8a1df004e3 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -3,6 +3,8 @@ .zombienet-before-script: before_script: + # Exit if the job is not merge queue + - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - echo "Zombie-net Tests Config" - echo "${ZOMBIENET_IMAGE}" - echo "${POLKADOT_IMAGE}" diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 8fc8b280bba..cc960557298 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -4,6 +4,8 @@ # common settings for all zombienet jobs .zombienet-polkadot-common: before_script: + # Exit if the job is not merge queue + - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export BUILD_RELEASE_VERSION="$(cat ./artifacts/BUILD_RELEASE_VERSION)" # from build-linux-stable job - export DEBUG=zombie,zombie::network-node - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} @@ -12,12 +14,12 @@ - export MALUS_IMAGE="${MALUS_IMAGE}":${PIPELINE_IMAGE_TAG} - IMAGE_AVAILABLE=$(curl -o /dev/null -w "%{http_code}" -I -L -s https://registry.hub.docker.com/v2/repositories/parity/polkadot/tags/${BUILD_RELEASE_VERSION}) - if [ $IMAGE_AVAILABLE -eq 200 ]; then - export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}"; + export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}"; else - echo "Getting the image to use as SECONDARY, using ${BUILD_RELEASE_VERSION} as base"; - VERSIONS=$(curl -L -s 'https://registry.hub.docker.com/v2/repositories/parity/polkadot/tags/' | jq -r '.results[].name'| grep -E "v[0-9]" |grep -vE "[0-9]-"); - VERSION_TO_USE=$(echo "${BUILD_RELEASE_VERSION}\n$VERSIONS"|sort -r|grep -A1 "${BUILD_RELEASE_VERSION}"|tail -1); - export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${VERSION_TO_USE}"; + echo "Getting the image to use as SECONDARY, using ${BUILD_RELEASE_VERSION} as base"; + VERSIONS=$(curl -L -s 'https://registry.hub.docker.com/v2/repositories/parity/polkadot/tags/' | jq -r '.results[].name'| grep -E "v[0-9]" |grep -vE "[0-9]-"); + VERSION_TO_USE=$(echo "${BUILD_RELEASE_VERSION}\n$VERSIONS"|sort -r|grep -A1 "${BUILD_RELEASE_VERSION}"|tail -1); + export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${VERSION_TO_USE}"; fi - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" @@ -117,6 +119,8 @@ zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common before_script: + # Exit if the job is not merge queue + - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} - echo "Zombienet Tests Config" @@ -134,6 +138,8 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke: extends: - .zombienet-polkadot-common before_script: + # Exit if the job is not merge queue + - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:${DOCKER_IMAGES_VERSION}" - echo "Zombienet Tests Config" @@ -176,6 +182,8 @@ zombienet-polkadot-misc-0002-upgrade-node: - job: build-linux-stable artifacts: true before_script: + # Exit if the job is not merge queue + - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="docker.io/parity/polkadot:latest" - echo "Overrided poladot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} diff --git a/.gitlab/pipeline/zombienet/substrate.yml b/.gitlab/pipeline/zombienet/substrate.yml index 9e14ebe0852..e627575a31a 100644 --- a/.gitlab/pipeline/zombienet/substrate.yml +++ b/.gitlab/pipeline/zombienet/substrate.yml @@ -4,6 +4,8 @@ # common settings for all zombienet jobs .zombienet-substrate-common: before_script: + # Exit if the job is not merge queue + - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - echo "Zombienet Tests Config" - echo "${ZOMBIENET_IMAGE}" - echo "${GH_DIR}" -- GitLab From c79b234b3bb9d02ee5145c8d65a497104b5d3ea7 Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Wed, 15 Nov 2023 15:22:28 +0100 Subject: [PATCH 40/74] Identity Deposits Relay to Parachain Migration (#1814) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The goal of this PR is to migrate Identity deposits from the Relay Chain to a system parachain. The problem I want to solve is that `IdentityOf` and `SubsOf` both store an amount that's held in reserve as a storage deposit. When migrating to a parachain, we can take a snapshot of the actual `IdentityInfo` and sub-account mappings, but should migrate (off chain) the `deposit`s to zero, since the chain (and by extension, accounts) won't have any funds at genesis. The good news is that we expect parachain deposits to be significantly lower (possibly 100x) on the parachain. That is, a deposit of 21 DOT on the Relay Chain would need 0.21 DOT on a parachain. This PR proposes to migrate the deposits in the following way: 1. Introduces a new pallet with two extrinsics: - `reap_identity`: Has a configurable `ReapOrigin`, which would be set to `EnsureSigned` on the Relay Chain (i.e. callable by anyone) and `EnsureRoot` on the parachain (we don't want identities reaped from there). - `poke_deposit`: Checks what deposit the pallet holds (at genesis, zero) and attempts to update the amount based on the calculated deposit for storage data. 2. `reap_identity` clears all storage data for a `target` account and unreserves their deposit. 3. A `ReapIdentityHandler` teleports the necessary DOT to the parachain and calls `poke_deposit`. Since the parachain deposit is much lower, and was just unreserved, we know we have enough. One awkwardness I ran into was that the XCMv3 instruction set does not provide a way for the system to teleport assets without a fee being deducted on reception. Users shouldn't have to pay a fee for the system to migrate their info to a more efficient location. So I wrote my own program and did the `InitiateTeleport` accounting on my own to send a program with `UnpaidExecution`. Have discussed an `InitiateUnpaidTeleport` instruction with @franciscoaguirre . Obviously any chain executing this would have to pass a `Barrier` for free execution. TODO: - [x] Confirm People Chain ParaId - [x] Confirm People Chain deposit rates (determined in https://github.com/paritytech/polkadot-sdk/pull/2281) - [x] Add pallet to Westend --------- Co-authored-by: Bastian Köcher --- Cargo.lock | 1 + polkadot/runtime/common/Cargo.toml | 4 + .../runtime/common/src/identity_migrator.rs | 305 ++++++++++++++++++ .../runtime/common/src/integration_tests.rs | 29 +- polkadot/runtime/common/src/lib.rs | 1 + polkadot/runtime/rococo/src/impls.rs | 158 +++++++++ polkadot/runtime/rococo/src/lib.rs | 39 ++- polkadot/runtime/rococo/src/weights/mod.rs | 1 + .../runtime_common_identity_migrator.rs | 97 ++++++ polkadot/runtime/westend/src/impls.rs | 158 +++++++++ polkadot/runtime/westend/src/lib.rs | 33 +- polkadot/runtime/westend/src/weights/mod.rs | 1 + .../runtime_common_identity_migrator.rs | 97 ++++++ substrate/frame/identity/src/lib.rs | 133 +++++++- substrate/frame/identity/src/tests.rs | 67 ++++ 15 files changed, 1111 insertions(+), 13 deletions(-) create mode 100644 polkadot/runtime/common/src/identity_migrator.rs create mode 100644 polkadot/runtime/rococo/src/impls.rs create mode 100644 polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs create mode 100644 polkadot/runtime/westend/src/impls.rs create mode 100644 polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs diff --git a/Cargo.lock b/Cargo.lock index 9ae1bb905b2..b1a46a88f38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13047,6 +13047,7 @@ dependencies = [ "pallet-balances", "pallet-election-provider-multi-phase", "pallet-fast-unstake", + "pallet-identity", "pallet-session", "pallet-staking", "pallet-staking-reward-fn", diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 0882e555aaf..4391b6d81eb 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -30,6 +30,7 @@ sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", def pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } pallet-fast-unstake = { path = "../../../substrate/frame/fast-unstake", default-features = false } +pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } pallet-session = { path = "../../../substrate/frame/session", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false } pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } @@ -85,6 +86,7 @@ std = [ "pallet-balances/std", "pallet-election-provider-multi-phase/std", "pallet-fast-unstake/std", + "pallet-identity/std", "pallet-session/std", "pallet-staking-reward-fn/std", "pallet-staking/std", @@ -124,6 +126,7 @@ runtime-benchmarks = [ "pallet-balances/runtime-benchmarks", "pallet-election-provider-multi-phase/runtime-benchmarks", "pallet-fast-unstake/runtime-benchmarks", + "pallet-identity/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", @@ -147,6 +150,7 @@ try-runtime = [ "pallet-balances/try-runtime", "pallet-election-provider-multi-phase/try-runtime", "pallet-fast-unstake/try-runtime", + "pallet-identity/try-runtime", "pallet-session/try-runtime", "pallet-staking/try-runtime", "pallet-timestamp/try-runtime", diff --git a/polkadot/runtime/common/src/identity_migrator.rs b/polkadot/runtime/common/src/identity_migrator.rs new file mode 100644 index 00000000000..cc2c3ce7773 --- /dev/null +++ b/polkadot/runtime/common/src/identity_migrator.rs @@ -0,0 +1,305 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This pallet is designed to go into a source chain and destination chain to migrate data. The +//! design motivations are: +//! +//! - Call some function on the source chain that executes some migration (clearing state, +//! forwarding an XCM program). +//! - Call some function (probably from an XCM program) on the destination chain. +//! - Avoid cluttering the source pallet with new dispatchables that are unrelated to its +//! functionality and only used for migration. +//! +//! After the migration is complete, the pallet may be removed from both chains' runtimes as well as +//! the `polkadot-runtime-common` crate. + +use frame_support::{dispatch::DispatchResult, traits::Currency, weights::Weight}; +pub use pallet::*; +use pallet_identity; +use sp_core::Get; + +#[cfg(feature = "runtime-benchmarks")] +use frame_benchmarking::{account, impl_benchmark_test_suite, v2::*, BenchmarkError}; + +pub trait WeightInfo { + fn reap_identity(r: u32, s: u32) -> Weight; + fn poke_deposit() -> Weight; +} + +impl WeightInfo for () { + fn reap_identity(_r: u32, _s: u32) -> Weight { + Weight::MAX + } + fn poke_deposit() -> Weight { + Weight::MAX + } +} + +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn reap_identity(_r: u32, _s: u32) -> Weight { + Weight::zero() + } + fn poke_deposit() -> Weight { + Weight::zero() + } +} + +// Must use the same `Balance` as `T`'s Identity pallet to handle deposits. +type BalanceOf = <::Currency as Currency< + ::AccountId, +>>::Balance; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{ + dispatch::{DispatchResultWithPostInfo, PostDispatchInfo}, + pallet_prelude::*, + traits::EnsureOrigin, + }; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + pallet_identity::Config { + /// Overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The origin that can reap identities. Expected to be `EnsureSigned` on the + /// source chain such that anyone can all this function. + type Reaper: EnsureOrigin; + + /// A handler for what to do when an identity is reaped. + type ReapIdentityHandler: OnReapIdentity; + + /// Weight information for the extrinsics in the pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// The identity and all sub accounts were reaped for `who`. + IdentityReaped { who: T::AccountId }, + /// The deposits held for `who` were updated. `identity` is the new deposit held for + /// identity info, and `subs` is the new deposit held for the sub-accounts. + DepositUpdated { who: T::AccountId, identity: BalanceOf, subs: BalanceOf }, + } + + #[pallet::call] + impl Pallet { + /// Reap the `IdentityInfo` of `who` from the Identity pallet of `T`, unreserving any + /// deposits held and removing storage items associated with `who`. + #[pallet::call_index(0)] + #[pallet::weight(::WeightInfo::reap_identity( + T::MaxRegistrars::get(), + T::MaxSubAccounts::get() + ))] + pub fn reap_identity( + origin: OriginFor, + who: T::AccountId, + ) -> DispatchResultWithPostInfo { + T::Reaper::ensure_origin(origin)?; + // - number of registrars (required to calculate weight) + // - byte size of `IdentityInfo` (required to calculate remote deposit) + // - number of sub accounts (required to calculate both weight and remote deposit) + let (registrars, bytes, subs) = pallet_identity::Pallet::::reap_identity(&who)?; + T::ReapIdentityHandler::on_reap_identity(&who, bytes, subs)?; + Self::deposit_event(Event::IdentityReaped { who }); + let post = PostDispatchInfo { + actual_weight: Some(::WeightInfo::reap_identity( + registrars, subs, + )), + pays_fee: Pays::No, + }; + Ok(post) + } + + /// Update the deposit of `who`. Meant to be called by the system with an XCM `Transact` + /// Instruction. + #[pallet::call_index(1)] + #[pallet::weight(::WeightInfo::poke_deposit())] + pub fn poke_deposit(origin: OriginFor, who: T::AccountId) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + let (id_deposit, subs_deposit) = pallet_identity::Pallet::::poke_deposit(&who)?; + Self::deposit_event(Event::DepositUpdated { + who, + identity: id_deposit, + subs: subs_deposit, + }); + Ok(Pays::No.into()) + } + } +} + +/// Trait to handle reaping identity from state. +pub trait OnReapIdentity { + /// What to do when an identity is reaped. For example, the implementation could send an XCM + /// program to another chain. Concretely, a type implementing this trait in the Polkadot + /// runtime would teleport enough DOT to the People Chain to cover the Identity deposit there. + /// + /// This could also directly include `Transact { poke_deposit(..), ..}`. + /// + /// Inputs + /// - `who`: Whose identity was reaped. + /// - `bytes`: The byte size of `IdentityInfo`. + /// - `subs`: The number of sub-accounts they had. + fn on_reap_identity(who: &AccountId, bytes: u32, subs: u32) -> DispatchResult; +} + +impl OnReapIdentity for () { + fn on_reap_identity(_who: &AccountId, _bytes: u32, _subs: u32) -> DispatchResult { + Ok(()) + } +} + +#[cfg(feature = "runtime-benchmarks")] +#[benchmarks] +mod benchmarks { + use super::*; + use frame_support::traits::EnsureOrigin; + use frame_system::RawOrigin; + use pallet_identity::{Data, IdentityInformationProvider, Judgement, Pallet as Identity}; + use parity_scale_codec::Encode; + use sp_runtime::{ + traits::{Bounded, Hash, StaticLookup}, + Saturating, + }; + use sp_std::{boxed::Box, vec::Vec, *}; + + const SEED: u32 = 0; + + fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); + } + + #[benchmark] + fn reap_identity( + r: Linear<0, { T::MaxRegistrars::get() }>, + s: Linear<0, { T::MaxSubAccounts::get() }>, + ) -> Result<(), BenchmarkError> { + // set up target + let target: T::AccountId = account("target", 0, SEED); + let target_origin = + ::RuntimeOrigin::from(RawOrigin::Signed(target.clone())); + let target_lookup = T::Lookup::unlookup(target.clone()); + let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); + + // set identity + let info = ::IdentityInformation::create_identity_info(); + Identity::::set_identity( + RawOrigin::Signed(target.clone()).into(), + Box::new(info.clone()), + )?; + + // create and set subs + let mut subs = Vec::new(); + let data = Data::Raw(vec![0; 32].try_into().unwrap()); + for ii in 0..s { + let sub_account = account("sub", ii, SEED); + subs.push((sub_account, data.clone())); + } + Identity::::set_subs(target_origin.clone(), subs.clone())?; + + // add registrars and provide judgements + let registrar_origin = T::RegistrarOrigin::try_successful_origin() + .expect("RegistrarOrigin has no successful origin required for the benchmark"); + for ii in 0..r { + // registrar account + let registrar: T::AccountId = account("registrar", ii, SEED); + let registrar_lookup = T::Lookup::unlookup(registrar.clone()); + let _ = ::Currency::make_free_balance_be( + ®istrar, + ::Currency::minimum_balance(), + ); + + // add registrar + Identity::::add_registrar(registrar_origin.clone(), registrar_lookup)?; + Identity::::set_fee(RawOrigin::Signed(registrar.clone()).into(), ii, 10u32.into())?; + let fields = ::IdentityInformation::all_fields(); + Identity::::set_fields(RawOrigin::Signed(registrar.clone()).into(), ii, fields)?; + + // request and provide judgement + Identity::::request_judgement(target_origin.clone(), ii, 10u32.into())?; + Identity::::provide_judgement( + RawOrigin::Signed(registrar).into(), + ii, + target_lookup.clone(), + Judgement::Reasonable, + ::Hashing::hash_of(&info), + )?; + } + + let origin = T::Reaper::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, target.clone()); + + assert_last_event::(Event::::IdentityReaped { who: target.clone() }.into()); + + let fields = ::IdentityInformation::all_fields(); + assert!(!Identity::::has_identity(&target, fields)); + assert_eq!(Identity::::subs(&target).len(), 0); + + Ok(()) + } + + #[benchmark] + fn poke_deposit() -> Result<(), BenchmarkError> { + let target: T::AccountId = account("target", 0, SEED); + let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); + let info = ::IdentityInformation::create_identity_info(); + + let _ = Identity::::set_identity_no_deposit(&target, info.clone()); + + let sub_account: T::AccountId = account("sub", 0, SEED); + let _ = Identity::::set_sub_no_deposit(&target, sub_account.clone()); + + // expected deposits + let expected_id_deposit = ::BasicDeposit::get() + .saturating_add( + ::ByteDeposit::get() + .saturating_mul(>::from(info.encoded_size() as u32)), + ); + // only 1 sub + let expected_sub_deposit = ::SubAccountDeposit::get(); + + #[extrinsic_call] + _(RawOrigin::Root, target.clone()); + + assert_last_event::( + Event::::DepositUpdated { + who: target, + identity: expected_id_deposit, + subs: expected_sub_deposit, + } + .into(), + ); + + Ok(()) + } + + impl_benchmark_test_suite!( + Pallet, + crate::integration_tests::new_test_ext(), + crate::integration_tests::Test, + ); +} diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index d5a32775fd4..793f75e79cd 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -17,7 +17,7 @@ //! Mocking utilities for testing with real pallets. use crate::{ - auctions, crowdloan, + auctions, crowdloan, identity_migrator, mock::{conclude_pvf_checking, validators_public_keys}, paras_registrar, slot_range::SlotRange, @@ -32,6 +32,7 @@ use frame_support::{ }; use frame_support_test::TestRandomness; use frame_system::EnsureRoot; +use pallet_identity::{self, legacy::IdentityInfo}; use parity_scale_codec::Encode; use primitives::{ BlockNumber, HeadData, Id as ParaId, SessionIndex, ValidationCode, LOWEST_PUBLIC_ID, @@ -88,6 +89,10 @@ frame_support::construct_runtime!( Auctions: auctions::{Pallet, Call, Storage, Event}, Crowdloan: crowdloan::{Pallet, Call, Storage, Event}, Slots: slots::{Pallet, Call, Storage, Event}, + + // Migrators + Identity: pallet_identity::{Pallet, Call, Storage, Event}, + IdentityMigrator: identity_migrator::{Pallet, Call, Event}, } ); @@ -274,6 +279,28 @@ impl crowdloan::Config for Test { type WeightInfo = crate::crowdloan::TestWeightInfo; } +impl pallet_identity::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type Slashed = (); + type BasicDeposit = ConstU32<100>; + type ByteDeposit = ConstU32<10>; + type SubAccountDeposit = ConstU32<100>; + type MaxSubAccounts = ConstU32<2>; + type IdentityInformation = IdentityInfo>; + type MaxRegistrars = ConstU32<20>; + type RegistrarOrigin = EnsureRoot; + type ForceOrigin = EnsureRoot; + type WeightInfo = (); +} + +impl identity_migrator::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Reaper = EnsureRoot; + type ReapIdentityHandler = (); + type WeightInfo = crate::identity_migrator::TestWeightInfo; +} + /// Create a new set of test externalities. pub fn new_test_ext() -> TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); diff --git a/polkadot/runtime/common/src/lib.rs b/polkadot/runtime/common/src/lib.rs index 70722d50988..bd49d3cccc9 100644 --- a/polkadot/runtime/common/src/lib.rs +++ b/polkadot/runtime/common/src/lib.rs @@ -23,6 +23,7 @@ pub mod auctions; pub mod claims; pub mod crowdloan; pub mod elections; +pub mod identity_migrator; pub mod impls; pub mod paras_registrar; pub mod paras_sudo_wrapper; diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs new file mode 100644 index 00000000000..71b1091eeb6 --- /dev/null +++ b/polkadot/runtime/rococo/src/impls.rs @@ -0,0 +1,158 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::xcm_config; +use frame_support::pallet_prelude::DispatchResult; +use frame_system::RawOrigin; +use parity_scale_codec::{Decode, Encode}; +use primitives::Balance; +use rococo_runtime_constants::currency::*; +use runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; +use sp_std::{marker::PhantomData, prelude::*}; +use xcm::{latest::prelude::*, VersionedMultiLocation, VersionedXcm}; +use xcm_executor::traits::TransactAsset; + +/// A type containing the encoding of the People Chain pallets in its runtime. Used to construct any +/// remote calls. The codec index must correspond to the index of `IdentityMigrator` in the +/// `construct_runtime` of the remote chain. +#[derive(Encode, Decode)] +enum PeopleRuntimePallets { + #[codec(index = 248)] + IdentityMigrator(IdentityMigratorCalls), +} + +/// Call encoding for the calls needed from the Identity Migrator pallet. +#[derive(Encode, Decode)] +enum IdentityMigratorCalls { + #[codec(index = 1)] + PokeDeposit(AccountId), +} + +/// Type that implements `OnReapIdentity` that will send the deposit needed to store the same +/// information on a parachain, sends the deposit there, and then updates it. +pub struct ToParachainIdentityReaper(PhantomData<(Runtime, AccountId)>); +impl ToParachainIdentityReaper { + /// Calculate the balance needed on the remote chain based on the `IdentityInfo` and `Subs` on + /// this chain. The total includes: + /// + /// - Identity basic deposit + /// - `IdentityInfo` byte deposit + /// - Sub accounts deposit + /// - 2x existential deposit (1 for account existence, 1 such that the user can transact) + fn calculate_remote_deposit(bytes: u32, subs: u32) -> Balance { + // Remote deposit constants. Parachain uses `deposit / 100` + // Source: + // https://github.com/paritytech/polkadot-sdk/blob/a146918/cumulus/parachains/common/src/rococo.rs#L29 + // + // Parachain Deposit Configuration: + // + // pub const BasicDeposit: Balance = deposit(1, 17); + // pub const ByteDeposit: Balance = deposit(0, 1); + // pub const SubAccountDeposit: Balance = deposit(1, 53); + // pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + let para_basic_deposit = deposit(1, 17) / 100; + let para_byte_deposit = deposit(0, 1) / 100; + let para_sub_account_deposit = deposit(1, 53) / 100; + let para_existential_deposit = EXISTENTIAL_DEPOSIT / 10; + + // pallet deposits + let id_deposit = + para_basic_deposit.saturating_add(para_byte_deposit.saturating_mul(bytes as Balance)); + let subs_deposit = para_sub_account_deposit.saturating_mul(subs as Balance); + + id_deposit + .saturating_add(subs_deposit) + .saturating_add(para_existential_deposit.saturating_mul(2)) + } +} + +impl OnReapIdentity for ToParachainIdentityReaper +where + Runtime: frame_system::Config + pallet_xcm::Config, + AccountId: Into<[u8; 32]> + Clone + Encode, +{ + fn on_reap_identity(who: &AccountId, fields: u32, subs: u32) -> DispatchResult { + use crate::{ + impls::IdentityMigratorCalls::PokeDeposit, + weights::runtime_common_identity_migrator::WeightInfo as MigratorWeights, + }; + + let total_to_send = Self::calculate_remote_deposit(fields, subs); + + // define asset / destination from relay perspective + let roc = MultiAsset { id: Concrete(Here.into_location()), fun: Fungible(total_to_send) }; + // People Chain: ParaId 1004 + let destination: MultiLocation = MultiLocation::new(0, Parachain(1004)); + + // Do `check_out` accounting since the XCM Executor's `InitiateTeleport` doesn't support + // unpaid teleports. + + // check out + xcm_config::LocalAssetTransactor::can_check_out( + &destination, + &roc, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ) + .map_err(|_| pallet_xcm::Error::::CannotCheckOutTeleport)?; + xcm_config::LocalAssetTransactor::check_out( + &destination, + &roc, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ); + + // reanchor + let roc_reanchored: MultiAssets = vec![MultiAsset { + id: Concrete(MultiLocation::new(1, Here)), + fun: Fungible(total_to_send), + }] + .into(); + + let poke = PeopleRuntimePallets::::IdentityMigrator(PokeDeposit(who.clone())); + let remote_weight_limit = MigratorWeights::::poke_deposit().saturating_mul(2); + + // Actual program to execute on People Chain. + let program: Xcm<()> = Xcm(vec![ + // Unpaid as this is constructed by the system, once per user. The user shouldn't have + // their balance reduced by teleport fees for the favor of migrating. + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + // Receive the asset into holding. + ReceiveTeleportedAsset(roc_reanchored), + // Deposit into the user's account. + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: Junction::AccountId32 { network: None, id: who.clone().into() } + .into_location() + .into(), + }, + // Poke the deposit to reserve the appropriate amount on the parachain. + Transact { + origin_kind: OriginKind::Superuser, + require_weight_at_most: remote_weight_limit, + call: poke.encode().into(), + }, + ]); + + // send + let _ = >::send( + RawOrigin::Root.into(), + Box::new(VersionedMultiLocation::V3(destination)), + Box::new(VersionedXcm::V3(program)), + )?; + Ok(()) + } +} diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 5a1e170862e..277c9981dab 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -31,7 +31,7 @@ use primitives::{ ValidatorIndex, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ - assigned_slots, auctions, claims, crowdloan, impl_runtime_weights, + assigned_slots, auctions, claims, crowdloan, identity_migrator, impl_runtime_weights, impls::{ LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedMultiLocationConverter, }, @@ -68,9 +68,9 @@ use frame_support::{ genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ - fungible::HoldConsideration, EitherOf, EitherOfDiverse, Everything, InstanceFilter, - KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, - StorageMapShim, WithdrawReasons, + fungible::HoldConsideration, Contains, EitherOf, EitherOfDiverse, EverythingBut, + InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, + ProcessMessageError, StorageMapShim, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, @@ -114,6 +114,10 @@ mod weights; // XCM configurations. pub mod xcm_config; +// Implemented types. +mod impls; +use impls::ToParachainIdentityReaper; + // Governance and configurations. pub mod governance; use governance::{ @@ -166,13 +170,24 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +/// A type to identify calls to the Identity pallet. These will be filtered to prevent invocation, +/// locking the state of the pallet and preventing further updates to identities and sub-identities. +/// The locked state will be the genesis state of a new system chain and then removed from the Relay +/// Chain. +pub struct IsIdentityCall; +impl Contains for IsIdentityCall { + fn contains(c: &RuntimeCall) -> bool { + matches!(c, RuntimeCall::Identity(_)) + } +} + parameter_types! { pub const Version: RuntimeVersion = VERSION; pub const SS58Prefix: u8 = 42; } impl frame_system::Config for Runtime { - type BaseCallFilter = Everything; + type BaseCallFilter = EverythingBut; type BlockWeights = BlockWeights; type BlockLength = BlockLength; type DbWeight = RocksDbWeight; @@ -1079,6 +1094,14 @@ impl auctions::Config for Runtime { type WeightInfo = weights::runtime_common_auctions::WeightInfo; } +impl identity_migrator::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + // To be changed to `EnsureSigned` once there is a People Chain to migrate to. + type Reaper = EnsureRoot; + type ReapIdentityHandler = ToParachainIdentityReaper; + type WeightInfo = weights::runtime_common_identity_migrator::WeightInfo; +} + type NisCounterpartInstance = pallet_balances::Instance2; impl pallet_balances::Config for Runtime { type Balance = Balance; @@ -1340,7 +1363,7 @@ construct_runtime! { // NIS pallet. Nis: pallet_nis::{Pallet, Call, Storage, Event, HoldReason} = 38, -// pub type NisCounterpartInstance = pallet_balances::Instance2; + // pub type NisCounterpartInstance = pallet_balances::Instance2; NisCounterpartBalances: pallet_balances:: = 45, // Parachains pallets. Start indices at 50 to leave room. @@ -1371,6 +1394,9 @@ construct_runtime! { // Pallet for sending XCM. XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 99, + // Pallet for migrating Identity to a parachain. To be removed post-migration. + IdentityMigrator: identity_migrator::{Pallet, Call, Event} = 248, + ParasSudoWrapper: paras_sudo_wrapper::{Pallet, Call} = 250, AssignedSlots: assigned_slots::{Pallet, Call, Storage, Event, Config} = 251, @@ -1551,6 +1577,7 @@ mod benches { [runtime_common::auctions, Auctions] [runtime_common::crowdloan, Crowdloan] [runtime_common::claims, Claims] + [runtime_common::identity_migrator, IdentityMigrator] [runtime_common::slots, Slots] [runtime_common::paras_registrar, Registrar] [runtime_parachains::configuration, Configuration] diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs index 9c563a67d98..bd2079ce827 100644 --- a/polkadot/runtime/rococo/src/weights/mod.rs +++ b/polkadot/runtime/rococo/src/weights/mod.rs @@ -46,6 +46,7 @@ pub mod runtime_common_assigned_slots; pub mod runtime_common_auctions; pub mod runtime_common_claims; pub mod runtime_common_crowdloan; +pub mod runtime_common_identity_migrator; pub mod runtime_common_paras_registrar; pub mod runtime_common_slots; pub mod runtime_parachains_assigner_on_demand; diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs b/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs new file mode 100644 index 00000000000..cec357453b6 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs @@ -0,0 +1,97 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `runtime_common::identity_migrator` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `sbtb`, CPU: `13th Gen Intel(R) Core(TM) i7-1365U` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=2 +// --repeat=1 +// --pallet=runtime_common::identity_migrator +// --extrinsic=* +// --output=./migrator-release.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_common::identity_migrator`. +pub struct WeightInfo(PhantomData); +impl runtime_common::identity_migrator::WeightInfo for WeightInfo { + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 20]`. + /// The range of component `s` is `[0, 100]`. + fn reap_identity(r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7292 + r * (8 ±0) + s * (32 ±0)` + // Estimated: `11003 + r * (8 ±0) + s * (33 ±0)` + // Minimum execution time: 163_756_000 picoseconds. + Weight::from_parts(158_982_500, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 1_143_629 + .saturating_add(Weight::from_parts(238_675, 0).saturating_mul(r.into())) + // Standard Error: 228_725 + .saturating_add(Weight::from_parts(1_529_645, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(s.into())) + } + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `7229` + // Estimated: `11003` + // Minimum execution time: 137_570_000 picoseconds. + Weight::from_parts(137_570_000, 0) + .saturating_add(Weight::from_parts(0, 11003)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs new file mode 100644 index 00000000000..80105594965 --- /dev/null +++ b/polkadot/runtime/westend/src/impls.rs @@ -0,0 +1,158 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::xcm_config; +use frame_support::pallet_prelude::DispatchResult; +use frame_system::RawOrigin; +use parity_scale_codec::{Decode, Encode}; +use primitives::Balance; +use runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; +use sp_std::{marker::PhantomData, prelude::*}; +use westend_runtime_constants::currency::*; +use xcm::{latest::prelude::*, VersionedMultiLocation, VersionedXcm}; +use xcm_executor::traits::TransactAsset; + +/// A type containing the encoding of the People Chain pallets in its runtime. Used to construct any +/// remote calls. The codec index must correspond to the index of `IdentityMigrator` in the +/// `construct_runtime` of the remote chain. +#[derive(Encode, Decode)] +enum PeopleRuntimePallets { + #[codec(index = 248)] + IdentityMigrator(IdentityMigratorCalls), +} + +/// Call encoding for the calls needed from the Identity Migrator pallet. +#[derive(Encode, Decode)] +enum IdentityMigratorCalls { + #[codec(index = 1)] + PokeDeposit(AccountId), +} + +/// Type that implements `OnReapIdentity` that will send the deposit needed to store the same +/// information on a parachain, sends the deposit there, and then updates it. +pub struct ToParachainIdentityReaper(PhantomData<(Runtime, AccountId)>); +impl ToParachainIdentityReaper { + /// Calculate the balance needed on the remote chain based on the `IdentityInfo` and `Subs` on + /// this chain. The total includes: + /// + /// - Identity basic deposit + /// - `IdentityInfo` byte deposit + /// - Sub accounts deposit + /// - 2x existential deposit (1 for account existence, 1 such that the user can transact) + fn calculate_remote_deposit(bytes: u32, subs: u32) -> Balance { + // Remote deposit constants. Parachain uses `deposit / 100` + // Source: + // https://github.com/paritytech/polkadot-sdk/blob/a146918/cumulus/parachains/common/src/westend.rs#L28 + // + // Parachain Deposit Configuration: + // + // pub const BasicDeposit: Balance = deposit(1, 17); + // pub const ByteDeposit: Balance = deposit(0, 1); + // pub const SubAccountDeposit: Balance = deposit(1, 53); + // pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + let para_basic_deposit = deposit(1, 17) / 100; + let para_byte_deposit = deposit(0, 1) / 100; + let para_sub_account_deposit = deposit(1, 53) / 100; + let para_existential_deposit = EXISTENTIAL_DEPOSIT / 10; + + // pallet deposits + let id_deposit = + para_basic_deposit.saturating_add(para_byte_deposit.saturating_mul(bytes as Balance)); + let subs_deposit = para_sub_account_deposit.saturating_mul(subs as Balance); + + id_deposit + .saturating_add(subs_deposit) + .saturating_add(para_existential_deposit.saturating_mul(2)) + } +} + +impl OnReapIdentity for ToParachainIdentityReaper +where + Runtime: frame_system::Config + pallet_xcm::Config, + AccountId: Into<[u8; 32]> + Clone + Encode, +{ + fn on_reap_identity(who: &AccountId, fields: u32, subs: u32) -> DispatchResult { + use crate::{ + impls::IdentityMigratorCalls::PokeDeposit, + weights::runtime_common_identity_migrator::WeightInfo as MigratorWeights, + }; + + let total_to_send = Self::calculate_remote_deposit(fields, subs); + + // define asset / destination from relay perspective + let wnd = MultiAsset { id: Concrete(Here.into_location()), fun: Fungible(total_to_send) }; + // People Chain: ParaId 1004 + let destination: MultiLocation = MultiLocation::new(0, Parachain(1004)); + + // Do `check_out` accounting since the XCM Executor's `InitiateTeleport` doesn't support + // unpaid teleports. + + // check out + xcm_config::LocalAssetTransactor::can_check_out( + &destination, + &wnd, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ) + .map_err(|_| pallet_xcm::Error::::CannotCheckOutTeleport)?; + xcm_config::LocalAssetTransactor::check_out( + &destination, + &wnd, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ); + + // reanchor + let wnd_reanchored: MultiAssets = vec![MultiAsset { + id: Concrete(MultiLocation::new(1, Here)), + fun: Fungible(total_to_send), + }] + .into(); + + let poke = PeopleRuntimePallets::::IdentityMigrator(PokeDeposit(who.clone())); + let remote_weight_limit = MigratorWeights::::poke_deposit().saturating_mul(2); + + // Actual program to execute on People Chain. + let program: Xcm<()> = Xcm(vec![ + // Unpaid as this is constructed by the system, once per user. The user shouldn't have + // their balance reduced by teleport fees for the favor of migrating. + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + // Receive the asset into holding. + ReceiveTeleportedAsset(wnd_reanchored), + // Deposit into the user's account. + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: Junction::AccountId32 { network: None, id: who.clone().into() } + .into_location() + .into(), + }, + // Poke the deposit to reserve the appropriate amount on the parachain. + Transact { + origin_kind: OriginKind::Superuser, + require_weight_at_most: remote_weight_limit, + call: poke.encode().into(), + }, + ]); + + // send + let _ = >::send( + RawOrigin::Root.into(), + Box::new(VersionedMultiLocation::V3(destination)), + Box::new(VersionedXcm::V3(program)), + )?; + Ok(()) + } +} diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index d3862aff257..2e8394b0ee4 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -31,7 +31,7 @@ use frame_support::{ genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ - fungible::HoldConsideration, ConstU32, EitherOf, EitherOfDiverse, Everything, + fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, ProcessMessage, ProcessMessageError, WithdrawReasons, }, @@ -56,7 +56,7 @@ use primitives::{ use runtime_common::{ assigned_slots, auctions, crowdloan, elections::OnChainAccuracy, - impl_runtime_weights, + identity_migrator, impl_runtime_weights, impls::{ LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedMultiLocationConverter, }, @@ -119,6 +119,10 @@ mod bag_thresholds; mod weights; pub mod xcm_config; +// Implemented types. +mod impls; +use impls::ToParachainIdentityReaper; + // Governance and configurations. pub mod governance; use governance::{ @@ -161,13 +165,24 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +/// A type to identify calls to the Identity pallet. These will be filtered to prevent invocation, +/// locking the state of the pallet and preventing further updates to identities and sub-identities. +/// The locked state will be the genesis state of a new system chain and then removed from the Relay +/// Chain. +pub struct IsIdentityCall; +impl Contains for IsIdentityCall { + fn contains(c: &RuntimeCall) -> bool { + matches!(c, RuntimeCall::Identity(_)) + } +} + parameter_types! { pub const Version: RuntimeVersion = VERSION; pub const SS58Prefix: u8 = 42; } impl frame_system::Config for Runtime { - type BaseCallFilter = Everything; + type BaseCallFilter = EverythingBut; type BlockWeights = BlockWeights; type BlockLength = BlockLength; type RuntimeOrigin = RuntimeOrigin; @@ -1328,6 +1343,14 @@ impl auctions::Config for Runtime { type WeightInfo = weights::runtime_common_auctions::WeightInfo; } +impl identity_migrator::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + // To be changed to `EnsureSigned` once there is a People Chain to migrate to. + type Reaper = EnsureRoot; + type ReapIdentityHandler = ToParachainIdentityReaper; + type WeightInfo = weights::runtime_common_identity_migrator::WeightInfo; +} + parameter_types! { pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); pub const MaxPointsToBalance: u8 = 10; @@ -1491,6 +1514,9 @@ construct_runtime! { // Root testing pallet. RootTesting: pallet_root_testing::{Pallet, Call, Storage, Event} = 102, + + // Pallet for migrating Identity to a parachain. To be removed post-migration. + IdentityMigrator: identity_migrator::{Pallet, Call, Event} = 248, } } @@ -1587,6 +1613,7 @@ mod benches { [runtime_common::assigned_slots, AssignedSlots] [runtime_common::auctions, Auctions] [runtime_common::crowdloan, Crowdloan] + [runtime_common::identity_migrator, IdentityMigrator] [runtime_common::paras_registrar, Registrar] [runtime_common::slots, Slots] [runtime_parachains::configuration, Configuration] diff --git a/polkadot/runtime/westend/src/weights/mod.rs b/polkadot/runtime/westend/src/weights/mod.rs index 9ae6798d70b..3841579088a 100644 --- a/polkadot/runtime/westend/src/weights/mod.rs +++ b/polkadot/runtime/westend/src/weights/mod.rs @@ -46,6 +46,7 @@ pub mod pallet_xcm; pub mod runtime_common_assigned_slots; pub mod runtime_common_auctions; pub mod runtime_common_crowdloan; +pub mod runtime_common_identity_migrator; pub mod runtime_common_paras_registrar; pub mod runtime_common_slots; pub mod runtime_parachains_configuration; diff --git a/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs b/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs new file mode 100644 index 00000000000..cec357453b6 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs @@ -0,0 +1,97 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `runtime_common::identity_migrator` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `sbtb`, CPU: `13th Gen Intel(R) Core(TM) i7-1365U` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=2 +// --repeat=1 +// --pallet=runtime_common::identity_migrator +// --extrinsic=* +// --output=./migrator-release.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_common::identity_migrator`. +pub struct WeightInfo(PhantomData); +impl runtime_common::identity_migrator::WeightInfo for WeightInfo { + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 20]`. + /// The range of component `s` is `[0, 100]`. + fn reap_identity(r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7292 + r * (8 ±0) + s * (32 ±0)` + // Estimated: `11003 + r * (8 ±0) + s * (33 ±0)` + // Minimum execution time: 163_756_000 picoseconds. + Weight::from_parts(158_982_500, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 1_143_629 + .saturating_add(Weight::from_parts(238_675, 0).saturating_mul(r.into())) + // Standard Error: 228_725 + .saturating_add(Weight::from_parts(1_529_645, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(s.into())) + } + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `7229` + // Estimated: `11003` + // Minimum execution time: 137_570_000 picoseconds. + Weight::from_parts(137_570_000, 0) + .saturating_add(Weight::from_parts(0, 11003)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/substrate/frame/identity/src/lib.rs b/substrate/frame/identity/src/lib.rs index 264ea3ddb41..133f9eeb4be 100644 --- a/substrate/frame/identity/src/lib.rs +++ b/substrate/frame/identity/src/lib.rs @@ -79,8 +79,10 @@ mod tests; mod types; pub mod weights; +use codec::Encode; use frame_support::{ - pallet_prelude::DispatchResult, + ensure, + pallet_prelude::{DispatchError, DispatchResult}, traits::{BalanceStatus, Currency, Get, OnUnbalanced, ReservableCurrency}, }; use sp_runtime::traits::{AppendZerosInput, Hash, Saturating, StaticLookup, Zero}; @@ -395,8 +397,7 @@ pub mod pallet { ); let (old_deposit, old_ids) = >::get(&sender); - let new_deposit = - T::SubAccountDeposit::get().saturating_mul(>::from(subs.len() as u32)); + let new_deposit = Self::subs_deposit(subs.len() as u32); let not_other_sub = subs.iter().filter_map(|i| SuperOf::::get(&i.0)).all(|i| i.0 == sender); @@ -898,6 +899,26 @@ impl Pallet { .collect() } + /// Calculate the deposit required for a number of `sub` accounts. + fn subs_deposit(subs: u32) -> BalanceOf { + T::SubAccountDeposit::get().saturating_mul(>::from(subs)) + } + + /// Take the `current` deposit that `who` is holding, and update it to a `new` one. + fn rejig_deposit( + who: &T::AccountId, + current: BalanceOf, + new: BalanceOf, + ) -> DispatchResult { + if new > current { + T::Currency::reserve(who, new - current)?; + } else if new < current { + let err_amount = T::Currency::unreserve(who, current - new); + debug_assert!(err_amount.is_zero()); + } + Ok(()) + } + /// Check if the account has corresponding identity information by the identity field. pub fn has_identity( who: &T::AccountId, @@ -906,4 +927,110 @@ impl Pallet { IdentityOf::::get(who) .map_or(false, |registration| (registration.info.has_identity(fields))) } + + /// Reap an identity, clearing associated storage items and refunding any deposits. This + /// function is very similar to (a) `clear_identity`, but called on a `target` account instead + /// of self; and (b) `kill_identity`, but without imposing a slash. + /// + /// Parameters: + /// - `target`: The account for which to reap identity state. + /// + /// Return type is a tuple of the number of registrars, `IdentityInfo` bytes, and sub accounts, + /// respectively. + /// + /// NOTE: This function is here temporarily for migration of Identity info from the Polkadot + /// Relay Chain into a system parachain. It will be removed after the migration. + pub fn reap_identity(who: &T::AccountId) -> Result<(u32, u32, u32), DispatchError> { + // `take` any storage items keyed by `target` + // identity + let id = >::take(&who).ok_or(Error::::NotNamed)?; + let registrars = id.judgements.len() as u32; + let encoded_byte_size = id.info.encoded_size() as u32; + + // subs + let (subs_deposit, sub_ids) = >::take(&who); + let actual_subs = sub_ids.len() as u32; + for sub in sub_ids.iter() { + >::remove(sub); + } + + // unreserve any deposits + let deposit = id.total_deposit().saturating_add(subs_deposit); + let err_amount = T::Currency::unreserve(&who, deposit); + debug_assert!(err_amount.is_zero()); + Ok((registrars, encoded_byte_size, actual_subs)) + } + + /// Update the deposits held by `target` for its identity info. + /// + /// Parameters: + /// - `target`: The account for which to update deposits. + /// + /// Return type is a tuple of the new Identity and Subs deposits, respectively. + /// + /// NOTE: This function is here temporarily for migration of Identity info from the Polkadot + /// Relay Chain into a system parachain. It will be removed after the migration. + pub fn poke_deposit( + target: &T::AccountId, + ) -> Result<(BalanceOf, BalanceOf), DispatchError> { + // Identity Deposit + let new_id_deposit = IdentityOf::::try_mutate( + &target, + |registration| -> Result, DispatchError> { + let reg = registration.as_mut().ok_or(Error::::NoIdentity)?; + // Calculate what deposit should be + let encoded_byte_size = reg.info.encoded_size() as u32; + let byte_deposit = + T::ByteDeposit::get().saturating_mul(>::from(encoded_byte_size)); + let new_id_deposit = T::BasicDeposit::get().saturating_add(byte_deposit); + + // Update account + Self::rejig_deposit(&target, reg.deposit, new_id_deposit)?; + + reg.deposit = new_id_deposit; + Ok(new_id_deposit) + }, + )?; + + // Subs Deposit + let new_subs_deposit = SubsOf::::try_mutate( + &target, + |(current_subs_deposit, subs_of)| -> Result, DispatchError> { + let new_subs_deposit = Self::subs_deposit(subs_of.len() as u32); + Self::rejig_deposit(&target, *current_subs_deposit, new_subs_deposit)?; + *current_subs_deposit = new_subs_deposit; + Ok(new_subs_deposit) + }, + )?; + Ok((new_id_deposit, new_subs_deposit)) + } + + /// Set an identity with zero deposit. Only used for benchmarking that involves `rejig_deposit`. + #[cfg(feature = "runtime-benchmarks")] + pub fn set_identity_no_deposit( + who: &T::AccountId, + info: T::IdentityInformation, + ) -> DispatchResult { + IdentityOf::::insert( + &who, + Registration { + judgements: Default::default(), + deposit: Zero::zero(), + info: info.clone(), + }, + ); + Ok(()) + } + + /// Set subs with zero deposit. Only used for benchmarking that involves `rejig_deposit`. + #[cfg(feature = "runtime-benchmarks")] + pub fn set_sub_no_deposit(who: &T::AccountId, sub: T::AccountId) -> DispatchResult { + use frame_support::BoundedVec; + let subs = BoundedVec::<_, T::MaxSubAccounts>::try_from(vec![sub]).unwrap(); + SubsOf::::insert::< + &T::AccountId, + (BalanceOf, BoundedVec), + >(&who, (Zero::zero(), subs)); + Ok(()) + } } diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs index 71192ea65a8..78074df933a 100644 --- a/substrate/frame/identity/src/tests.rs +++ b/substrate/frame/identity/src/tests.rs @@ -712,3 +712,70 @@ fn test_has_identity() { )); }); } + +#[test] +fn reap_identity_works() { + new_test_ext().execute_with(|| { + let ten_info = ten(); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten_info.clone()))); + assert_ok!(Identity::set_subs( + RuntimeOrigin::signed(10), + vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))] + )); + // deposit is correct + let id_deposit = id_deposit(&ten_info); + let subs_deposit: u64 = <::SubAccountDeposit as Get>::get(); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - subs_deposit); + // reap + assert_ok!(Identity::reap_identity(&10)); + // no identity or subs + assert!(Identity::identity(10).is_none()); + assert!(Identity::super_of(20).is_none()); + // balance is unreserved + assert_eq!(Balances::free_balance(10), 1000); + }); +} + +#[test] +fn poke_deposit_works() { + new_test_ext().execute_with(|| { + let ten_info = ten(); + // Set a custom registration with 0 deposit + IdentityOf::::insert( + &10, + Registration { + judgements: BoundedVec::default(), + deposit: Zero::zero(), + info: ten_info.clone(), + }, + ); + assert!(Identity::identity(10).is_some()); + // Set a sub with zero deposit + SubsOf::::insert::<&u64, (u64, BoundedVec>)>( + &10, + (0, vec![20].try_into().unwrap()), + ); + SuperOf::::insert(&20, (&10, Data::Raw(vec![1; 1].try_into().unwrap()))); + // Balance is free + assert_eq!(Balances::free_balance(10), 1000); + + // poke + assert_ok!(Identity::poke_deposit(&10)); + + // free balance reduced correctly + let id_deposit = id_deposit(&ten_info); + let subs_deposit: u64 = <::SubAccountDeposit as Get>::get(); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - subs_deposit); + // new registration deposit is 10 + assert_eq!( + Identity::identity(&10), + Some(Registration { + judgements: BoundedVec::default(), + deposit: id_deposit, + info: ten() + }) + ); + // new subs deposit is 10 vvvvvvvvvvvv + assert_eq!(Identity::subs_of(10), (subs_deposit, vec![20].try_into().unwrap())); + }); +} -- GitLab From 0226b55f9ffff28821458a71c84eb5d3e75f1794 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Wed, 15 Nov 2023 15:01:55 +0000 Subject: [PATCH 41/74] Add `collectives-westend` and `glutton-westend` runtimes (#2024) Add collectives and glutton parachain westend runtimes to prepare for #1737. The removal of system parachain native runtimes #1737 is blocked until chainspecs and runtime APIs can be dealt with cleanly (merge of #1256 and follow up PRs). In the meantime, these additions are ready to be merged to `master`, so I have separated them out into this PR. Also marked `bridge-hub-westend` as unimplemented in line with [this issue](https://github.com/paritytech/parity-bridges-common/issues/2602). TODO - [x] add to `command-bot` benchmarks - [x] add to `command-bot-scripts` benchmarks - [x] generate weights --------- Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: Muharem Co-authored-by: command-bot <> Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Branislav Kontur --- .../build-and-attach-release-runtimes.yml | 2 + .gitlab/pipeline/check.yml | 13 + .gitlab/pipeline/short-benchmarks.yml | 10 + Cargo.lock | 134 +++ Cargo.toml | 2 + .../collectives-westend/Cargo.toml | 230 ++++ .../collectives/collectives-westend/build.rs | 26 + .../collectives-westend/src/ambassador/mod.rs | 262 +++++ .../src/ambassador/origins.rs | 135 +++ .../src/ambassador/tracks.rs | 282 +++++ .../collectives-westend/src/fellowship/mod.rs | 238 ++++ .../src/fellowship/origins.rs | 247 ++++ .../src/fellowship/tracks.rs | 532 +++++++++ .../collectives-westend/src/impls.rs | 229 ++++ .../collectives-westend/src/lib.rs | 1023 +++++++++++++++++ .../src/weights/block_weights.rs | 53 + .../src/weights/cumulus_pallet_dmp_queue.rs | 131 +++ .../cumulus_pallet_parachain_system.rs | 80 ++ .../src/weights/cumulus_pallet_xcmp_queue.rs | 148 +++ .../src/weights/extrinsic_weights.rs | 53 + .../src/weights/frame_system.rs | 154 +++ .../collectives-westend/src/weights/mod.rs | 50 + .../src/weights/pallet_alliance.rs | 494 ++++++++ .../src/weights/pallet_balances.rs | 152 +++ .../src/weights/pallet_collator_selection.rs | 246 ++++ .../src/weights/pallet_collective.rs | 304 +++++ .../src/weights/pallet_collective_content.rs | 93 ++ .../pallet_core_fellowship_ambassador_core.rs | 223 ++++ .../pallet_core_fellowship_fellowship_core.rs | 222 ++++ .../src/weights/pallet_message_queue.rs | 179 +++ .../src/weights/pallet_multisig.rs | 164 +++ .../src/weights/pallet_preimage.rs | 232 ++++ .../src/weights/pallet_proxy.rs | 225 ++++ ...ranked_collective_ambassador_collective.rs | 177 +++ ...ranked_collective_fellowship_collective.rs | 176 +++ .../pallet_referenda_ambassador_referenda.rs | 536 +++++++++ .../pallet_referenda_fellowship_referenda.rs | 535 +++++++++ .../pallet_salary_ambassador_salary.rs | 190 +++ .../pallet_salary_fellowship_salary.rs | 189 +++ .../src/weights/pallet_scheduler.rs | 206 ++++ .../src/weights/pallet_session.rs | 80 ++ .../src/weights/pallet_timestamp.rs | 74 ++ .../src/weights/pallet_utility.rs | 101 ++ .../src/weights/pallet_xcm.rs | 323 ++++++ .../src/weights/paritydb_weights.rs | 63 + .../src/weights/rocksdb_weights.rs | 63 + .../collectives-westend/src/xcm_config.rs | 364 ++++++ .../glutton/glutton-westend/Cargo.toml | 138 +++ .../runtimes/glutton/glutton-westend/build.rs | 24 + .../glutton/glutton-westend/src/lib.rs | 532 +++++++++ .../cumulus_pallet_parachain_system.rs | 75 ++ .../src/weights/frame_system.rs | 153 +++ .../glutton-westend/src/weights/mod.rs | 19 + .../src/weights/pallet_glutton.rs | 178 +++ .../src/weights/pallet_message_queue.rs | 179 +++ .../src/weights/pallet_timestamp.rs | 73 ++ .../glutton/glutton-westend/src/xcm_config.rs | 92 ++ cumulus/parachains/testnets-common/Cargo.toml | 44 + cumulus/parachains/testnets-common/src/lib.rs | 30 + .../parachains/testnets-common/src/rococo.rs | 119 ++ .../parachains/testnets-common/src/westend.rs | 140 +++ .../parachains/testnets-common/src/wococo.rs | 17 + cumulus/polkadot-parachain/Cargo.toml | 6 + .../src/chain_spec/collectives.rs | 133 +++ .../src/chain_spec/glutton.rs | 69 ++ cumulus/polkadot-parachain/src/command.rs | 101 +- cumulus/polkadot-parachain/src/service.rs | 30 + cumulus/scripts/benchmarks.sh | 2 + 68 files changed, 11792 insertions(+), 7 deletions(-) create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/build.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/origins.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/origins.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/block_weights.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/extrinsic_weights.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_alliance.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_balances.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collator_selection.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective_content.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_preimage.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_ambassador_collective.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_fellowship_collective.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_ambassador_referenda.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_fellowship_referenda.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_ambassador_salary.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_fellowship_salary.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_session.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_timestamp.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/paritydb_weights.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/rocksdb_weights.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/build.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/mod.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_glutton.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_timestamp.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs create mode 100644 cumulus/parachains/testnets-common/Cargo.toml create mode 100644 cumulus/parachains/testnets-common/src/lib.rs create mode 100644 cumulus/parachains/testnets-common/src/rococo.rs create mode 100644 cumulus/parachains/testnets-common/src/westend.rs create mode 100644 cumulus/parachains/testnets-common/src/wococo.rs diff --git a/.github/workflows/build-and-attach-release-runtimes.yml b/.github/workflows/build-and-attach-release-runtimes.yml index c7cd4b34384..db0175c6855 100644 --- a/.github/workflows/build-and-attach-release-runtimes.yml +++ b/.github/workflows/build-and-attach-release-runtimes.yml @@ -19,6 +19,8 @@ jobs: - { name: asset-hub-westend, package: asset-hub-westend-runtime, path: cumulus/parachains/runtimes/assets/asset-hub-westend } - { name: bridge-hub-rococo, package: bridge-hub-rococo-runtime, path: cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo } - { name: contracts-rococo, package: contracts-rococo-runtime, path: cumulus/parachains/runtimes/contracts/contracts-rococo } + - { name: collectives-westend, package: collectives-westend-runtime, path: cumulus/parachains/runtimes/collectives/collectives-westend } + - { name: glutton-westend, package: glutton-westend-runtime, path: cumulus/parachains/runtimes/glutton/glutton-westend } build_config: # Release build has logging disabled and no dev features - { type: on-chain-release, opts: --features on-chain-release-build } diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index cbb3baf277c..4071fdf9758 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -177,6 +177,19 @@ check-runtime-migration-contracts-rococo: WASM: "contracts_rococo_runtime.compact.compressed.wasm" URI: "wss://rococo-contracts-rpc.polkadot.io:443" +# Check runtime migrations for Parity managed collectives chains +check-runtime-migration-collectives-westend: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "collectives-westend" + PACKAGE: "collectives-westend-runtime" + WASM: "collectives_westend_runtime.compact.compressed.wasm" + URI: "wss://westend-collectives-rpc.polkadot.io:443" + find-fail-ci-phrase: stage: check variables: diff --git a/.gitlab/pipeline/short-benchmarks.yml b/.gitlab/pipeline/short-benchmarks.yml index 76c75e815ce..0218d3fdac0 100644 --- a/.gitlab/pipeline/short-benchmarks.yml +++ b/.gitlab/pipeline/short-benchmarks.yml @@ -94,7 +94,17 @@ short-benchmark-collectives-polkadot: variables: RUNTIME_CHAIN: collectives-polkadot-dev +short-benchmark-collectives-westend: + <<: *short-bench-cumulus + variables: + RUNTIME_CHAIN: collectives-westend-dev + short-benchmark-glutton-kusama: <<: *short-bench-cumulus variables: RUNTIME_CHAIN: glutton-kusama-dev-1300 + +short-benchmark-glutton-westend: + <<: *short-bench-cumulus + variables: + RUNTIME_CHAIN: glutton-westend-dev-1300 diff --git a/Cargo.lock b/Cargo.lock index b1a46a88f38..6b52e62d742 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3001,6 +3001,80 @@ dependencies = [ "substrate-wasm-builder", ] +[[package]] +name = "collectives-westend-runtime" +version = "1.0.0" +dependencies = [ + "cumulus-pallet-aura-ext", + "cumulus-pallet-dmp-queue", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "hex-literal", + "log", + "pallet-alliance", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-collective", + "pallet-collective-content", + "pallet-core-fellowship", + "pallet-message-queue", + "pallet-multisig", + "pallet-preimage", + "pallet-proxy", + "pallet-ranked-collective", + "pallet-referenda", + "pallet-salary", + "pallet-scheduler", + "pallet-session", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "scale-info", + "smallvec", + "sp-api", + "sp-arithmetic", + "sp-block-builder", + "sp-consensus-aura", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-io", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-transaction-pool", + "sp-version", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnets-common", + "westend-runtime-constants", +] + [[package]] name = "color-eyre" version = "0.6.2" @@ -6280,6 +6354,51 @@ dependencies = [ "substrate-wasm-builder", ] +[[package]] +name = "glutton-westend-runtime" +version = "1.0.0" +dependencies = [ + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcm", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-timestamp", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-aura", + "pallet-glutton", + "pallet-message-queue", + "pallet-sudo", + "pallet-timestamp", + "parachains-common", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-transaction-pool", + "sp-version", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", +] + [[package]] name = "group" version = "0.12.1" @@ -12872,6 +12991,7 @@ dependencies = [ "bridge-hub-westend-runtime", "clap 4.4.6", "collectives-polkadot-runtime", + "collectives-westend-runtime", "color-print", "contracts-rococo-runtime", "cumulus-client-cli", @@ -12889,6 +13009,7 @@ dependencies = [ "frame-benchmarking-cli", "futures", "glutton-runtime", + "glutton-westend-runtime", "hex-literal", "jsonrpsee", "log", @@ -19217,6 +19338,19 @@ dependencies = [ "sp-weights", ] +[[package]] +name = "testnets-common" +version = "1.0.0" +dependencies = [ + "frame-support", + "polkadot-core-primitives", + "rococo-runtime-constants", + "smallvec", + "sp-runtime", + "substrate-wasm-builder", + "westend-runtime-constants", +] + [[package]] name = "textwrap" version = "0.16.0" diff --git a/Cargo.toml b/Cargo.toml index f9779681cae..27351c09581 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -94,8 +94,10 @@ members = [ "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend", "cumulus/parachains/runtimes/bridge-hubs/test-utils", "cumulus/parachains/runtimes/collectives/collectives-polkadot", + "cumulus/parachains/runtimes/collectives/collectives-westend", "cumulus/parachains/runtimes/contracts/contracts-rococo", "cumulus/parachains/runtimes/glutton/glutton-kusama", + "cumulus/parachains/runtimes/glutton/glutton-westend", "cumulus/parachains/runtimes/starters/seedling", "cumulus/parachains/runtimes/starters/shell", "cumulus/parachains/runtimes/test-utils", diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml new file mode 100644 index 00000000000..94f2de33e90 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -0,0 +1,230 @@ +[package] +name = "collectives-westend-runtime" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Westend Collectives Parachain Runtime" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +hex-literal = { version = "0.4.1" } +log = { version = "0.4.20", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +smallvec = "1.11.0" + +# Substrate +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} +frame-support = { path = "../../../../../substrate/frame/support", default-features = false} +frame-system = { path = "../../../../../substrate/frame/system", default-features = false} +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} +pallet-alliance = { path = "../../../../../substrate/frame/alliance", default-features = false} +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} +pallet-collective = { path = "../../../../../substrate/frame/collective", default-features = false} +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} +pallet-preimage = { path = "../../../../../substrate/frame/preimage", default-features = false } +pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} +pallet-scheduler = { path = "../../../../../substrate/frame/scheduler", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} +pallet-referenda = { path = "../../../../../substrate/frame/referenda", default-features = false} +pallet-ranked-collective = { path = "../../../../../substrate/frame/ranked-collective", default-features = false} +pallet-core-fellowship = { path = "../../../../../substrate/frame/core-fellowship", default-features = false} +pallet-salary = { path = "../../../../../substrate/frame/salary", default-features = false} +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} +sp-arithmetic = { path = "../../../../../substrate/primitives/arithmetic", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} + +# Polkadot +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false} + +# Cumulus +cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } +cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } +cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } +pallet-collective-content = { path = "../../../pallets/collective-content", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } +parachains-common = { path = "../../../common", default-features = false } +testnets-common = { path = "../../../testnets-common", default-features = false } + +[build-dependencies] +substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } + +[dev-dependencies] +sp-io = { path = "../../../../../substrate/primitives/io", features = [ "std" ]} + +[features] +default = [ "std" ] +runtime-benchmarks = [ + "cumulus-pallet-dmp-queue/runtime-benchmarks", + "cumulus-pallet-parachain-system/runtime-benchmarks", + "cumulus-pallet-session-benchmarking/runtime-benchmarks", + "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", + "cumulus-primitives-utility/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-alliance/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-collator-selection/runtime-benchmarks", + "pallet-collective-content/runtime-benchmarks", + "pallet-collective/runtime-benchmarks", + "pallet-core-fellowship/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-multisig/runtime-benchmarks", + "pallet-preimage/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", + "pallet-ranked-collective/runtime-benchmarks", + "pallet-referenda/runtime-benchmarks", + "pallet-salary/runtime-benchmarks", + "pallet-scheduler/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "parachains-common/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "testnets-common/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] +try-runtime = [ + "cumulus-pallet-aura-ext/try-runtime", + "cumulus-pallet-dmp-queue/try-runtime", + "cumulus-pallet-parachain-system/try-runtime", + "cumulus-pallet-xcm/try-runtime", + "cumulus-pallet-xcmp-queue/try-runtime", + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-alliance/try-runtime", + "pallet-aura/try-runtime", + "pallet-authorship/try-runtime", + "pallet-balances/try-runtime", + "pallet-collator-selection/try-runtime", + "pallet-collective-content/try-runtime", + "pallet-collective/try-runtime", + "pallet-core-fellowship/try-runtime", + "pallet-message-queue/try-runtime", + "pallet-multisig/try-runtime", + "pallet-preimage/try-runtime", + "pallet-proxy/try-runtime", + "pallet-ranked-collective/try-runtime", + "pallet-referenda/try-runtime", + "pallet-salary/try-runtime", + "pallet-scheduler/try-runtime", + "pallet-session/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", + "pallet-utility/try-runtime", + "pallet-xcm/try-runtime", + "parachain-info/try-runtime", + "polkadot-runtime-common/try-runtime", + "sp-runtime/try-runtime", +] +std = [ + "codec/std", + "cumulus-pallet-aura-ext/std", + "cumulus-pallet-dmp-queue/std", + "cumulus-pallet-parachain-system/std", + "cumulus-pallet-session-benchmarking/std", + "cumulus-pallet-xcm/std", + "cumulus-pallet-xcmp-queue/std", + "cumulus-primitives-core/std", + "cumulus-primitives-utility/std", + "frame-benchmarking?/std", + "frame-executive/std", + "frame-support/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "frame-try-runtime?/std", + "log/std", + "pallet-alliance/std", + "pallet-aura/std", + "pallet-authorship/std", + "pallet-balances/std", + "pallet-collator-selection/std", + "pallet-collective-content/std", + "pallet-collective/std", + "pallet-core-fellowship/std", + "pallet-message-queue/std", + "pallet-multisig/std", + "pallet-preimage/std", + "pallet-proxy/std", + "pallet-ranked-collective/std", + "pallet-referenda/std", + "pallet-salary/std", + "pallet-scheduler/std", + "pallet-session/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + "pallet-utility/std", + "pallet-xcm/std", + "parachain-info/std", + "parachains-common/std", + "polkadot-core-primitives/std", + "polkadot-parachain-primitives/std", + "polkadot-runtime-common/std", + "scale-info/std", + "sp-api/std", + "sp-arithmetic/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-core/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-storage/std", + "sp-transaction-pool/std", + "sp-version/std", + "substrate-wasm-builder", + "testnets-common/std", + "westend-runtime-constants/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] + +experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs new file mode 100644 index 00000000000..60f8a125129 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs @@ -0,0 +1,26 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[cfg(feature = "std")] +fn main() { + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build() +} + +#[cfg(not(feature = "std"))] +fn main() {} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs new file mode 100644 index 00000000000..18c1466bf36 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs @@ -0,0 +1,262 @@ +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Ambassador Program. +//! +//! The module defines the following on-chain functionality of the Ambassador Program: +//! +//! - Managed set of program members, where every member has a [rank](ranks) +//! (via [AmbassadorCollective](pallet_ranked_collective)). +//! - Referendum functionality for the program members to propose, vote on, and execute +//! proposals on behalf of the members of a certain [rank](Origin) +//! (via [AmbassadorReferenda](pallet_referenda)). +//! - Managed content (charter, announcements) (via [pallet_collective_content]). +//! - Promotion and demotion periods, register of members' activity, and rank based salaries +//! (via [AmbassadorCore](pallet_core_fellowship)). +//! - Members' salaries (via [AmbassadorSalary](pallet_salary), requiring a member to be +//! imported or inducted into [AmbassadorCore](pallet_core_fellowship)). + +pub mod origins; +mod tracks; + +use super::*; +use crate::xcm_config::{FellowshipAdminBodyId, WndAssetHub}; +use frame_support::traits::{EitherOf, MapSuccess, TryMapSuccess}; +pub use origins::pallet_origins as pallet_ambassador_origins; +use origins::pallet_origins::{ + EnsureAmbassadorsVoice, EnsureAmbassadorsVoiceFrom, EnsureHeadAmbassadorsVoice, Origin, +}; +use parachains_common::polkadot::account; +use sp_core::ConstU128; +use sp_runtime::traits::{CheckedReduceBy, ConstU16, ConvertToValue, Replace}; +use xcm::prelude::*; +use xcm_builder::{AliasesIntoAccountId32, PayOverXcm}; + +/// The Ambassador Program's member ranks. +pub mod ranks { + use pallet_ranked_collective::Rank; + + #[allow(dead_code)] + pub const CANDIDATE: Rank = 0; + pub const AMBASSADOR_TIER_1: Rank = 1; + pub const AMBASSADOR_TIER_2: Rank = 2; + pub const SENIOR_AMBASSADOR_TIER_3: Rank = 3; + pub const SENIOR_AMBASSADOR_TIER_4: Rank = 4; + pub const HEAD_AMBASSADOR_TIER_5: Rank = 5; + pub const HEAD_AMBASSADOR_TIER_6: Rank = 6; + pub const HEAD_AMBASSADOR_TIER_7: Rank = 7; + pub const MASTER_AMBASSADOR_TIER_8: Rank = 8; + pub const MASTER_AMBASSADOR_TIER_9: Rank = 9; +} + +impl pallet_ambassador_origins::Config for Runtime {} + +pub type AmbassadorCollectiveInstance = pallet_ranked_collective::Instance2; + +/// Demotion is by any of: +/// - Root can demote arbitrarily. +/// - the FellowshipAdmin origin (i.e. token holder referendum); +/// - a senior members vote by the rank two above the current rank. +pub type DemoteOrigin = EitherOf< + frame_system::EnsureRootWithSuccess>, + EitherOf< + MapSuccess< + EnsureXcm>, + Replace>, + >, + TryMapSuccess< + EnsureAmbassadorsVoiceFrom>, + CheckedReduceBy>, + >, + >, +>; + +/// Promotion and approval (rank-retention) is by any of: +/// - Root can promote arbitrarily. +/// - the FellowshipAdmin origin (i.e. token holder referendum); +/// - a senior members vote by the rank two above the new/current rank. +/// - a member of rank `5` or above can add a candidate (rank `0`). +pub type PromoteOrigin = EitherOf< + DemoteOrigin, + TryMapSuccess< + pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::HEAD_AMBASSADOR_TIER_5 }, + >, + Replace>, + >, +>; + +impl pallet_ranked_collective::Config for Runtime { + type WeightInfo = weights::pallet_ranked_collective_ambassador_collective::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type PromoteOrigin = PromoteOrigin; + type DemoteOrigin = DemoteOrigin; + type Polls = AmbassadorReferenda; + type MinRankOfClass = sp_runtime::traits::Identity; + type VoteWeight = pallet_ranked_collective::Linear; +} + +parameter_types! { + pub const AlarmInterval: BlockNumber = 1; + pub const SubmissionDeposit: Balance = 0; + pub const UndecidingTimeout: BlockNumber = 7 * DAYS; + // The Ambassador Referenda pallet account, used as a temporary place to deposit a slashed + // imbalance before teleport to the treasury. + pub AmbassadorPalletAccount: AccountId = account::AMBASSADOR_REFERENDA_PALLET_ID.into_account_truncating(); +} + +pub type AmbassadorReferendaInstance = pallet_referenda::Instance2; + +impl pallet_referenda::Config for Runtime { + type WeightInfo = weights::pallet_referenda_ambassador_referenda::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + // A proposal can be submitted by a member of the Ambassador Program of + // [ranks::SENIOR_AMBASSADOR_TIER_3] rank or higher. + type SubmitOrigin = pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::SENIOR_AMBASSADOR_TIER_3 }, + >; + type CancelOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; + type KillOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; + type Slash = ToParentTreasury; + type Votes = pallet_ranked_collective::Votes; + type Tally = pallet_ranked_collective::TallyOf; + type SubmissionDeposit = SubmissionDeposit; + type MaxQueued = ConstU32<20>; + type UndecidingTimeout = UndecidingTimeout; + type AlarmInterval = AlarmInterval; + type Tracks = tracks::TracksInfo; + type Preimages = Preimage; +} + +parameter_types! { + pub const AnnouncementLifetime: BlockNumber = 180 * DAYS; + pub const MaxAnnouncements: u32 = 50; +} + +pub type AmbassadorContentInstance = pallet_collective_content::Instance1; + +impl pallet_collective_content::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type CharterOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; + type AnnouncementLifetime = AnnouncementLifetime; + // An announcement can be submitted by a Senior Ambassador member or an ambassador plurality + // voice taken via referendum. + type AnnouncementOrigin = EitherOfDiverse< + pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::SENIOR_AMBASSADOR_TIER_3 }, + >, + EnsureAmbassadorsVoice, + >; + type MaxAnnouncements = MaxAnnouncements; + type WeightInfo = weights::pallet_collective_content::WeightInfo; +} + +pub type AmbassadorCoreInstance = pallet_core_fellowship::Instance2; + +impl pallet_core_fellowship::Config for Runtime { + type WeightInfo = weights::pallet_core_fellowship_ambassador_core::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Members = pallet_ranked_collective::Pallet; + type Balance = Balance; + // Parameters are set by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote among all Head Ambassadors. + type ParamsOrigin = EitherOfDiverse< + EnsureRoot, + EitherOfDiverse< + EnsureXcm>, + EnsureHeadAmbassadorsVoice, + >, + >; + // Induction (creating a candidate) is by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a single Head Ambassador; + // - a vote among all senior members. + type InductOrigin = EitherOfDiverse< + EnsureRoot, + EitherOfDiverse< + EnsureXcm>, + EitherOfDiverse< + pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::HEAD_AMBASSADOR_TIER_5 }, + >, + EnsureAmbassadorsVoiceFrom>, + >, + >, + >; + type ApproveOrigin = PromoteOrigin; + type PromoteOrigin = PromoteOrigin; + type EvidenceSize = ConstU32<65536>; +} + +pub type AmbassadorSalaryInstance = pallet_salary::Instance2; + +parameter_types! { + // The interior location on AssetHub for the paying account. This is the Ambassador Salary + // pallet instance (which sits at index 74). This sovereign account will need funding. + pub AmbassadorSalaryLocation: InteriorMultiLocation = PalletInstance(74).into(); +} + +/// [`PayOverXcm`] setup to pay the Ambassador salary on the AssetHub in WND. +pub type AmbassadorSalaryPaymaster = PayOverXcm< + AmbassadorSalaryLocation, + crate::xcm_config::XcmRouter, + crate::PolkadotXcm, + ConstU32<{ 6 * HOURS }>, + AccountId, + (), + ConvertToValue, + AliasesIntoAccountId32<(), AccountId>, +>; + +impl pallet_salary::Config for Runtime { + type WeightInfo = weights::pallet_salary_ambassador_salary::WeightInfo; + type RuntimeEvent = RuntimeEvent; + + #[cfg(not(feature = "runtime-benchmarks"))] + type Paymaster = AmbassadorSalaryPaymaster; + #[cfg(feature = "runtime-benchmarks")] + type Paymaster = crate::impls::benchmarks::PayWithEnsure< + AmbassadorSalaryPaymaster, + crate::impls::benchmarks::OpenHrmpChannel>, + >; + type Members = pallet_ranked_collective::Pallet; + + #[cfg(not(feature = "runtime-benchmarks"))] + type Salary = pallet_core_fellowship::Pallet; + #[cfg(feature = "runtime-benchmarks")] + type Salary = frame_support::traits::tokens::ConvertRank< + crate::impls::benchmarks::RankToSalary, + >; + // 15 days to register for a salary payment. + type RegistrationPeriod = ConstU32<{ 15 * DAYS }>; + // 15 days to claim the salary payment. + type PayoutPeriod = ConstU32<{ 15 * DAYS }>; + // Total monthly salary budget. + type Budget = ConstU128<{ 10_000 * DOLLARS }>; +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/origins.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/origins.rs new file mode 100644 index 00000000000..3ce8a6b9e5d --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/origins.rs @@ -0,0 +1,135 @@ +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Ambassador Program's origins. + +#[frame_support::pallet] +pub mod pallet_origins { + use crate::ambassador::ranks; + use frame_support::pallet_prelude::*; + use pallet_ranked_collective::Rank; + + #[pallet::pallet] + pub struct Pallet(PhantomData); + + /// The pallet configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[derive(PartialEq, Eq, Clone, MaxEncodedLen, Encode, Decode, TypeInfo, RuntimeDebug)] + #[pallet::origin] + pub enum Origin { + /// Plurality voice of the [ranks::AMBASSADOR_TIER_1] members or above given via + /// referendum. + Ambassadors, + /// Plurality voice of the [ranks::AMBASSADOR_TIER_2] members or above given via + /// referendum. + AmbassadorsTier2, + /// Plurality voice of the [ranks::SENIOR_AMBASSADOR_TIER_3] members or above given via + /// referendum. + SeniorAmbassadors, + /// Plurality voice of the [ranks::SENIOR_AMBASSADOR_TIER_4] members or above given via + /// referendum. + SeniorAmbassadorsTier4, + /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_5] members or above given via + /// referendum. + HeadAmbassadors, + /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_6] members or above given via + /// referendum. + HeadAmbassadorsTier6, + /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_7] members or above given via + /// referendum. + HeadAmbassadorsTier7, + /// Plurality voice of the [ranks::MASTER_AMBASSADOR_TIER_8] members or above given via + /// referendum. + MasterAmbassadors, + /// Plurality voice of the [ranks::MASTER_AMBASSADOR_TIER_9] members or above given via + /// referendum. + MasterAmbassadorsTier9, + } + + impl Origin { + /// Returns the rank that the origin `self` speaks for, or `None` if it doesn't speak for + /// any. + pub fn as_voice(&self) -> Option { + Some(match &self { + Origin::Ambassadors => ranks::AMBASSADOR_TIER_1, + Origin::AmbassadorsTier2 => ranks::AMBASSADOR_TIER_2, + Origin::SeniorAmbassadors => ranks::SENIOR_AMBASSADOR_TIER_3, + Origin::SeniorAmbassadorsTier4 => ranks::SENIOR_AMBASSADOR_TIER_4, + Origin::HeadAmbassadors => ranks::HEAD_AMBASSADOR_TIER_5, + Origin::HeadAmbassadorsTier6 => ranks::HEAD_AMBASSADOR_TIER_6, + Origin::HeadAmbassadorsTier7 => ranks::HEAD_AMBASSADOR_TIER_7, + Origin::MasterAmbassadors => ranks::MASTER_AMBASSADOR_TIER_8, + Origin::MasterAmbassadorsTier9 => ranks::MASTER_AMBASSADOR_TIER_9, + }) + } + } + + /// Implementation of the [EnsureOrigin] trait for the [Origin::HeadAmbassadors] origin. + pub struct EnsureHeadAmbassadorsVoice; + impl> + From> EnsureOrigin for EnsureHeadAmbassadorsVoice { + type Success = (); + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + Origin::HeadAmbassadors => Ok(()), + r => Err(O::from(r)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::HeadAmbassadors)) + } + } + + /// Implementation of the [EnsureOrigin] trait for the plurality voice [Origin]s + /// from a given rank `R` with the success result of the corresponding [Rank]. + pub struct EnsureAmbassadorsVoiceFrom(PhantomData); + impl, O: Into> + From> EnsureOrigin + for EnsureAmbassadorsVoiceFrom + { + type Success = Rank; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match Origin::as_voice(&o) { + Some(r) if r >= R::get() => Ok(r), + _ => Err(O::from(o)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + ranks::MASTER_AMBASSADOR_TIER_9 + .ge(&R::get()) + .then(|| O::from(Origin::MasterAmbassadorsTier9)) + .ok_or(()) + } + } + + /// Implementation of the [EnsureOrigin] trait for the plurality voice [Origin]s with the + /// success result of the corresponding [Rank]. + pub struct EnsureAmbassadorsVoice; + impl> + From> EnsureOrigin for EnsureAmbassadorsVoice { + type Success = Rank; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| Origin::as_voice(&o).ok_or(O::from(o))) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::MasterAmbassadorsTier9)) + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs new file mode 100644 index 00000000000..d4a2d3bbf1c --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs @@ -0,0 +1,282 @@ +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Ambassador Program's referenda voting tracks. + +use super::Origin; +use crate::{Balance, BlockNumber, RuntimeOrigin, DAYS, DOLLARS, HOURS}; +use sp_runtime::Perbill; + +/// Referendum `TrackId` type. +pub type TrackId = u16; + +/// Referendum track IDs. +pub mod constants { + use super::TrackId; + + pub const AMBASSADOR_TIER_1: TrackId = 1; + pub const AMBASSADOR_TIER_2: TrackId = 2; + pub const SENIOR_AMBASSADOR_TIER_3: TrackId = 3; + pub const SENIOR_AMBASSADOR_TIER_4: TrackId = 4; + pub const HEAD_AMBASSADOR_TIER_5: TrackId = 5; + pub const HEAD_AMBASSADOR_TIER_6: TrackId = 6; + pub const HEAD_AMBASSADOR_TIER_7: TrackId = 7; + pub const MASTER_AMBASSADOR_TIER_8: TrackId = 8; + pub const MASTER_AMBASSADOR_TIER_9: TrackId = 9; +} + +/// The type implementing the [`pallet_referenda::TracksInfo`] trait for referenda pallet. +pub struct TracksInfo; + +/// Information on the voting tracks. +impl pallet_referenda::TracksInfo for TracksInfo { + type Id = TrackId; + + type RuntimeOrigin = ::PalletsOrigin; + + /// Return the array of available tracks and their information. + fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo)] { + static DATA: [(TrackId, pallet_referenda::TrackInfo); 9] = [ + ( + constants::AMBASSADOR_TIER_1, + pallet_referenda::TrackInfo { + name: "ambassador tier 1", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::AMBASSADOR_TIER_2, + pallet_referenda::TrackInfo { + name: "ambassador tier 2", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::SENIOR_AMBASSADOR_TIER_3, + pallet_referenda::TrackInfo { + name: "senior ambassador tier 3", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::SENIOR_AMBASSADOR_TIER_4, + pallet_referenda::TrackInfo { + name: "senior ambassador tier 4", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::HEAD_AMBASSADOR_TIER_5, + pallet_referenda::TrackInfo { + name: "head ambassador tier 5", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::HEAD_AMBASSADOR_TIER_6, + pallet_referenda::TrackInfo { + name: "head ambassador tier 6", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::HEAD_AMBASSADOR_TIER_7, + pallet_referenda::TrackInfo { + name: "head ambassador tier 7", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::MASTER_AMBASSADOR_TIER_8, + pallet_referenda::TrackInfo { + name: "master ambassador tier 8", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::MASTER_AMBASSADOR_TIER_9, + pallet_referenda::TrackInfo { + name: "master ambassador tier 9", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ]; + &DATA[..] + } + + /// Determine the voting track for the given `origin`. + fn track_for(id: &Self::RuntimeOrigin) -> Result { + #[cfg(feature = "runtime-benchmarks")] + { + // For benchmarks, we enable a root origin. + // It is important that this is not available in production! + let root: Self::RuntimeOrigin = frame_system::RawOrigin::Root.into(); + if &root == id { + return Ok(constants::MASTER_AMBASSADOR_TIER_9) + } + } + + match Origin::try_from(id.clone()) { + Ok(Origin::Ambassadors) => Ok(constants::AMBASSADOR_TIER_1), + Ok(Origin::AmbassadorsTier2) => Ok(constants::AMBASSADOR_TIER_2), + Ok(Origin::SeniorAmbassadors) => Ok(constants::SENIOR_AMBASSADOR_TIER_3), + Ok(Origin::SeniorAmbassadorsTier4) => Ok(constants::SENIOR_AMBASSADOR_TIER_4), + Ok(Origin::HeadAmbassadors) => Ok(constants::HEAD_AMBASSADOR_TIER_5), + Ok(Origin::HeadAmbassadorsTier6) => Ok(constants::HEAD_AMBASSADOR_TIER_6), + Ok(Origin::HeadAmbassadorsTier7) => Ok(constants::HEAD_AMBASSADOR_TIER_7), + Ok(Origin::MasterAmbassadors) => Ok(constants::MASTER_AMBASSADOR_TIER_8), + Ok(Origin::MasterAmbassadorsTier9) => Ok(constants::MASTER_AMBASSADOR_TIER_9), + _ => Err(()), + } + } +} + +// implements [`frame_support::traits::Get`] for [`TracksInfo`] +pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs new file mode 100644 index 00000000000..b7412705dde --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs @@ -0,0 +1,238 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! The Westend Technical Fellowship. + +mod origins; +mod tracks; +use crate::{ + impls::ToParentTreasury, + weights, + xcm_config::{FellowshipAdminBodyId, UsdtAssetHub}, + AccountId, Balance, Balances, FellowshipReferenda, GovernanceLocation, Preimage, Runtime, + RuntimeCall, RuntimeEvent, RuntimeOrigin, Scheduler, WestendTreasuryAccount, DAYS, +}; +use frame_support::{ + parameter_types, + traits::{EitherOf, EitherOfDiverse, MapSuccess, OriginTrait, TryWithMorphedArg}, +}; +use frame_system::EnsureRootWithSuccess; +pub use origins::{ + pallet_origins as pallet_fellowship_origins, Architects, EnsureCanPromoteTo, EnsureCanRetainAt, + EnsureFellowship, Fellows, Masters, Members, ToVoice, +}; +use pallet_ranked_collective::EnsureOfRank; +use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use parachains_common::{polkadot::account, HOURS}; +use sp_core::{ConstU128, ConstU32}; +use sp_runtime::traits::{AccountIdConversion, ConstU16, ConvertToValue, Replace, TakeFirst}; +use xcm_builder::{AliasesIntoAccountId32, PayOverXcm}; + +#[cfg(feature = "runtime-benchmarks")] +use crate::impls::benchmarks::{OpenHrmpChannel, PayWithEnsure}; + +/// The Fellowship members' ranks. +pub mod ranks { + use pallet_ranked_collective::Rank; + + pub const DAN_1: Rank = 1; // aka Members. + pub const DAN_2: Rank = 2; + pub const DAN_3: Rank = 3; // aka Fellows. + pub const DAN_4: Rank = 4; // aka Architects. + pub const DAN_5: Rank = 5; + pub const DAN_6: Rank = 6; + pub const DAN_7: Rank = 7; // aka Masters. + pub const DAN_8: Rank = 8; + pub const DAN_9: Rank = 9; +} + +parameter_types! { + // Referenda pallet account, used to temporarily deposit slashed imbalance before teleporting. + pub ReferendaPalletAccount: AccountId = account::REFERENDA_PALLET_ID.into_account_truncating(); +} + +impl pallet_fellowship_origins::Config for Runtime {} + +pub type FellowshipReferendaInstance = pallet_referenda::Instance1; + +impl pallet_referenda::Config for Runtime { + type WeightInfo = weights::pallet_referenda_fellowship_referenda::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + // Fellows can submit proposals. + type SubmitOrigin = EitherOf< + pallet_ranked_collective::EnsureMember, + MapSuccess< + TryWithMorphedArg< + RuntimeOrigin, + ::PalletsOrigin, + ToVoice, + EnsureOfRank, + (AccountId, u16), + >, + TakeFirst, + >, + >; + type CancelOrigin = Architects; + type KillOrigin = Masters; + type Slash = ToParentTreasury; + type Votes = pallet_ranked_collective::Votes; + type Tally = pallet_ranked_collective::TallyOf; + type SubmissionDeposit = ConstU128<0>; + type MaxQueued = ConstU32<100>; + type UndecidingTimeout = ConstU32<{ 7 * DAYS }>; + type AlarmInterval = ConstU32<1>; + type Tracks = tracks::TracksInfo; + type Preimages = Preimage; +} + +pub type FellowshipCollectiveInstance = pallet_ranked_collective::Instance1; + +impl pallet_ranked_collective::Config for Runtime { + type WeightInfo = weights::pallet_ranked_collective_fellowship_collective::WeightInfo; + type RuntimeEvent = RuntimeEvent; + + #[cfg(not(feature = "runtime-benchmarks"))] + // Promotions and the induction of new members are serviced by `FellowshipCore` pallet instance. + type PromoteOrigin = frame_system::EnsureNever; + #[cfg(feature = "runtime-benchmarks")] + // The maximum value of `u16` set as a success value for the root to ensure the benchmarks will + // pass. + type PromoteOrigin = EnsureRootWithSuccess>; + + // Demotion is by any of: + // - Root can demote arbitrarily. + // - the FellowshipAdmin origin (i.e. token holder referendum); + // + // The maximum value of `u16` set as a success value for the root to ensure the benchmarks will + // pass. + type DemoteOrigin = EitherOf< + EnsureRootWithSuccess>, + MapSuccess< + EnsureXcm>, + Replace>, + >, + >; + type Polls = FellowshipReferenda; + type MinRankOfClass = tracks::MinRankOfClass; + type VoteWeight = pallet_ranked_collective::Geometric; +} + +pub type FellowshipCoreInstance = pallet_core_fellowship::Instance1; + +impl pallet_core_fellowship::Config for Runtime { + type WeightInfo = weights::pallet_core_fellowship_fellowship_core::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Members = pallet_ranked_collective::Pallet; + type Balance = Balance; + // Parameters are set by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote among all Fellows. + type ParamsOrigin = EitherOfDiverse< + EnsureXcm>, + Fellows, + >; + // Induction (creating a candidate) is by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a single Fellow; + // - a vote among all Members. + type InductOrigin = EitherOfDiverse< + EnsureXcm>, + EitherOfDiverse< + pallet_ranked_collective::EnsureMember< + Runtime, + FellowshipCollectiveInstance, + { ranks::DAN_3 }, + >, + Members, + >, + >; + // Approval (rank-retention) of a Member's current rank is by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote by the rank two above the current rank for all retention up to the Master rank. + type ApproveOrigin = EitherOf< + MapSuccess< + EnsureXcm>, + Replace>, + >, + EnsureCanRetainAt, + >; + // Promotion is by any of: + // - Root can promote arbitrarily. + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote by the rank two above the new rank for all promotions up to the Master rank. + type PromoteOrigin = EitherOf< + MapSuccess< + EnsureXcm>, + Replace>, + >, + EnsureCanPromoteTo, + >; + type EvidenceSize = ConstU32<65536>; +} + +pub type FellowshipSalaryInstance = pallet_salary::Instance1; + +use xcm::prelude::*; + +parameter_types! { + // The interior location on AssetHub for the paying account. This is the Fellowship Salary + // pallet instance (which sits at index 64). This sovereign account will need funding. + pub Interior: InteriorMultiLocation = PalletInstance(64).into(); +} + +const USDT_UNITS: u128 = 1_000_000; + +/// [`PayOverXcm`] setup to pay the Fellowship salary on the AssetHub in USDT. +pub type FellowshipSalaryPaymaster = PayOverXcm< + Interior, + crate::xcm_config::XcmRouter, + crate::PolkadotXcm, + ConstU32<{ 6 * HOURS }>, + AccountId, + (), + ConvertToValue, + AliasesIntoAccountId32<(), AccountId>, +>; + +impl pallet_salary::Config for Runtime { + type WeightInfo = weights::pallet_salary_fellowship_salary::WeightInfo; + type RuntimeEvent = RuntimeEvent; + + #[cfg(not(feature = "runtime-benchmarks"))] + type Paymaster = FellowshipSalaryPaymaster; + #[cfg(feature = "runtime-benchmarks")] + type Paymaster = PayWithEnsure>>; + type Members = pallet_ranked_collective::Pallet; + + #[cfg(not(feature = "runtime-benchmarks"))] + type Salary = pallet_core_fellowship::Pallet; + #[cfg(feature = "runtime-benchmarks")] + type Salary = frame_support::traits::tokens::ConvertRank< + crate::impls::benchmarks::RankToSalary, + >; + // 15 days to register for a salary payment. + type RegistrationPeriod = ConstU32<{ 15 * DAYS }>; + // 15 days to claim the salary payment. + type PayoutPeriod = ConstU32<{ 15 * DAYS }>; + // Total monthly salary budget. + type Budget = ConstU128<{ 100_000 * USDT_UNITS }>; +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/origins.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/origins.rs new file mode 100644 index 00000000000..5ed2c19f79e --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/origins.rs @@ -0,0 +1,247 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Fellowship custom origins. + +use super::ranks; +pub use pallet_origins::*; + +#[frame_support::pallet] +pub mod pallet_origins { + use super::ranks; + use frame_support::pallet_prelude::*; + use pallet_ranked_collective::Rank; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[derive(PartialEq, Eq, Clone, MaxEncodedLen, Encode, Decode, TypeInfo, RuntimeDebug)] + #[pallet::origin] + pub enum Origin { + /// Origin aggregated through weighted votes of those with rank 1 or above; `Success` is 1. + /// Aka the "voice" of all Members. + Members, + /// Origin aggregated through weighted votes of those with rank 2 or above; `Success` is 2. + /// Aka the "voice" of members at least II Dan. + Fellowship2Dan, + /// Origin aggregated through weighted votes of those with rank 3 or above; `Success` is 3. + /// Aka the "voice" of all Fellows. + Fellows, + /// Origin aggregated through weighted votes of those with rank 4 or above; `Success` is 4. + /// Aka the "voice" of members at least IV Dan. + Architects, + /// Origin aggregated through weighted votes of those with rank 5 or above; `Success` is 5. + /// Aka the "voice" of members at least V Dan. + Fellowship5Dan, + /// Origin aggregated through weighted votes of those with rank 6 or above; `Success` is 6. + /// Aka the "voice" of members at least VI Dan. + Fellowship6Dan, + /// Origin aggregated through weighted votes of those with rank 7 or above; `Success` is 7. + /// Aka the "voice" of all Masters. + Masters, + /// Origin aggregated through weighted votes of those with rank 8 or above; `Success` is 8. + /// Aka the "voice" of members at least VIII Dan. + Fellowship8Dan, + /// Origin aggregated through weighted votes of those with rank 9 or above; `Success` is 9. + /// Aka the "voice" of members at least IX Dan. + Fellowship9Dan, + + /// Origin aggregated through weighted votes of those with rank 3 or above when voting on + /// a fortnight-long track; `Success` is 1. + RetainAt1Dan, + /// Origin aggregated through weighted votes of those with rank 4 or above when voting on + /// a fortnight-long track; `Success` is 2. + RetainAt2Dan, + /// Origin aggregated through weighted votes of those with rank 5 or above when voting on + /// a fortnight-long track; `Success` is 3. + RetainAt3Dan, + /// Origin aggregated through weighted votes of those with rank 6 or above when voting on + /// a fortnight-long track; `Success` is 4. + RetainAt4Dan, + /// Origin aggregated through weighted votes of those with rank 7 or above when voting on + /// a fortnight-long track; `Success` is 5. + RetainAt5Dan, + /// Origin aggregated through weighted votes of those with rank 8 or above when voting on + /// a fortnight-long track; `Success` is 6. + RetainAt6Dan, + + /// Origin aggregated through weighted votes of those with rank 3 or above when voting on + /// a month-long track; `Success` is 1. + PromoteTo1Dan, + /// Origin aggregated through weighted votes of those with rank 4 or above when voting on + /// a month-long track; `Success` is 2. + PromoteTo2Dan, + /// Origin aggregated through weighted votes of those with rank 5 or above when voting on + /// a month-long track; `Success` is 3. + PromoteTo3Dan, + /// Origin aggregated through weighted votes of those with rank 6 or above when voting on + /// a month-long track; `Success` is 4. + PromoteTo4Dan, + /// Origin aggregated through weighted votes of those with rank 7 or above when voting on + /// a month-long track; `Success` is 5. + PromoteTo5Dan, + /// Origin aggregated through weighted votes of those with rank 8 or above when voting on + /// a month-long track; `Success` is 6. + PromoteTo6Dan, + } + + impl Origin { + /// Returns the rank that the origin `self` speaks for, or `None` if it doesn't speak for + /// any. + /// + /// `Some` will be returned only for the first 9 elements of [Origin]. + pub fn as_voice(&self) -> Option { + Some(match &self { + Origin::Members => ranks::DAN_1, + Origin::Fellowship2Dan => ranks::DAN_2, + Origin::Fellows => ranks::DAN_3, + Origin::Architects => ranks::DAN_4, + Origin::Fellowship5Dan => ranks::DAN_5, + Origin::Fellowship6Dan => ranks::DAN_6, + Origin::Masters => ranks::DAN_7, + Origin::Fellowship8Dan => ranks::DAN_8, + Origin::Fellowship9Dan => ranks::DAN_9, + _ => return None, + }) + } + } + + /// A `TryMorph` implementation which is designed to convert an aggregate `RuntimeOrigin` + /// value into the Fellowship voice it represents if it is a Fellowship pallet origin an + /// appropriate variant. See also [Origin::as_voice]. + pub struct ToVoice; + impl<'a, O: 'a + TryInto<&'a Origin>> sp_runtime::traits::TryMorph for ToVoice { + type Outcome = pallet_ranked_collective::Rank; + fn try_morph(o: O) -> Result { + o.try_into().ok().and_then(Origin::as_voice).ok_or(()) + } + } + + macro_rules! decl_unit_ensures { + ( $name:ident: $success_type:ty = $success:expr ) => { + pub struct $name; + impl> + From> + EnsureOrigin for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + Origin::$name => Ok($success), + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::$name)) + } + } + }; + ( $name:ident ) => { decl_unit_ensures! { $name : () = () } }; + ( $name:ident: $success_type:ty = $success:expr, $( $rest:tt )* ) => { + decl_unit_ensures! { $name: $success_type = $success } + decl_unit_ensures! { $( $rest )* } + }; + ( $name:ident, $( $rest:tt )* ) => { + decl_unit_ensures! { $name } + decl_unit_ensures! { $( $rest )* } + }; + () => {} + } + decl_unit_ensures!( + Members: Rank = ranks::DAN_1, + Fellows: Rank = ranks::DAN_3, + Architects: Rank = ranks::DAN_4, + Masters: Rank = ranks::DAN_7, + ); + + macro_rules! decl_ensure { + ( + $vis:vis type $name:ident: EnsureOrigin { + $( $item:ident = $success:expr, )* + } + ) => { + $vis struct $name; + impl> + From> + EnsureOrigin for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + $( + Origin::$item => Ok($success), + )* + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + // By convention the more privileged origins go later, so for greatest chance + // of success, we want the last one. + let _result: Result = Err(()); + $( + let _result: Result = Ok(O::from(Origin::$item)); + )* + _result + } + } + } + } + + // Fellowship origin indicating weighted voting from at least the rank of `Success` on a + // week-long track. + decl_ensure! { + pub type EnsureFellowship: EnsureOrigin { + Members = ranks::DAN_1, + Fellowship2Dan = ranks::DAN_2, + Fellows = ranks::DAN_3, + Architects = ranks::DAN_4, + Fellowship5Dan = ranks::DAN_5, + Fellowship6Dan = ranks::DAN_6, + Masters = ranks::DAN_7, + Fellowship8Dan = ranks::DAN_8, + Fellowship9Dan = ranks::DAN_9, + } + } + + // Fellowship origin indicating weighted voting from at least the rank of `Success + 2` on + // a fortnight-long track; needed for Fellowship retention voting. + decl_ensure! { + pub type EnsureCanRetainAt: EnsureOrigin { + RetainAt1Dan = ranks::DAN_1, + RetainAt2Dan = ranks::DAN_2, + RetainAt3Dan = ranks::DAN_3, + RetainAt4Dan = ranks::DAN_4, + RetainAt5Dan = ranks::DAN_5, + RetainAt6Dan = ranks::DAN_6, + } + } + + // Fellowship origin indicating weighted voting from at least the rank of `Success + 2` on + // a month-long track; needed for Fellowship promotion voting. + decl_ensure! { + pub type EnsureCanPromoteTo: EnsureOrigin { + PromoteTo1Dan = ranks::DAN_1, + PromoteTo2Dan = ranks::DAN_2, + PromoteTo3Dan = ranks::DAN_3, + PromoteTo4Dan = ranks::DAN_4, + PromoteTo5Dan = ranks::DAN_5, + PromoteTo6Dan = ranks::DAN_6, + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs new file mode 100644 index 00000000000..099bdf4cf75 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs @@ -0,0 +1,532 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Track configurations for Fellowship. + +use crate::{Balance, BlockNumber, RuntimeOrigin, DAYS, DOLLARS, HOURS, MINUTES}; +use pallet_ranked_collective::Rank; +use sp_runtime::{traits::Convert, Perbill}; + +/// Referendum `TrackId` type. +pub type TrackId = u16; + +/// Referendum track IDs. +pub mod constants { + use super::TrackId; + + // Regular tracks (7 days) used for general operations. The required rank for voting is the + // same as that which is named (and also the track ID). + pub const MEMBERS: TrackId = 1; + pub const PROFICIENTS: TrackId = 2; + pub const FELLOWS: TrackId = 3; + pub const ARCHITECTS: TrackId = 4; + pub const ARCHITECTS_ADEPT: TrackId = 5; + pub const GRAND_ARCHITECTS: TrackId = 6; + pub const MASTERS: TrackId = 7; + pub const MASTERS_CONSTANT: TrackId = 8; + pub const GRAND_MASTERS: TrackId = 9; + + // Longer tracks (14 days) used for rank retention. These require a rank of two more than the + // grade at which they retain (as per the whitepaper). This works out as the track ID minus 8. + pub const RETAIN_AT_1DAN: TrackId = 11; + pub const RETAIN_AT_2DAN: TrackId = 12; + pub const RETAIN_AT_3DAN: TrackId = 13; + pub const RETAIN_AT_4DAN: TrackId = 14; + pub const RETAIN_AT_5DAN: TrackId = 15; + pub const RETAIN_AT_6DAN: TrackId = 16; + + // Longest tracks (30 days) used for promotions. These require a rank of two more than the + // grade to which they promote (as per the whitepaper). This works out as the track ID minus 18. + pub const PROMOTE_TO_1DAN: TrackId = 21; + pub const PROMOTE_TO_2DAN: TrackId = 22; + pub const PROMOTE_TO_3DAN: TrackId = 23; + pub const PROMOTE_TO_4DAN: TrackId = 24; + pub const PROMOTE_TO_5DAN: TrackId = 25; + pub const PROMOTE_TO_6DAN: TrackId = 26; +} + +/// Convert the track ID (defined above) into the minimum rank (i.e. fellowship Dan grade) required +/// to vote on the track. +pub struct MinRankOfClass; +impl Convert for MinRankOfClass { + fn convert(a: TrackId) -> Rank { + match a { + // Just a regular vote: the track ID is conveniently the same as the minimum rank. + regular @ 1..=9 => regular, + // A retention vote; the track ID turns out to be 8 more than the minimum required rank. + retention @ 11..=16 => retention - 8, + // A promotion vote; the track ID turns out to be 18 more than the minimum required + // rank. + promotion @ 21..=26 => promotion - 18, + _ => Rank::max_value(), + } + } +} + +const RETAIN_MAX_DECIDING: u32 = 25; +const RETAIN_DECISION_DEPOSIT: Balance = 5 * DOLLARS; +const RETAIN_PREPARE_PERIOD: BlockNumber = 0; +const RETAIN_DECISION_PERIOD: BlockNumber = 14 * DAYS; +const RETAIN_CONFIRM_PERIOD: BlockNumber = 1 * HOURS; +const RETAIN_MIN_ENACTMENT_PERIOD: BlockNumber = 0; +const RETAIN_MIN_APPROVAL: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(60), + ceil: Perbill::from_percent(100), +}; +const RETAIN_MIN_SUPPORT: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(100), +}; + +const PROMOTE_MAX_DECIDING: u32 = 10; +const PROMOTE_DECISION_DEPOSIT: Balance = 5 * DOLLARS; +const PROMOTE_PREPARE_PERIOD: BlockNumber = 0; +const PROMOTE_DECISION_PERIOD: BlockNumber = 30 * DAYS; +const PROMOTE_CONFIRM_PERIOD: BlockNumber = 1 * HOURS; +const PROMOTE_MIN_ENACTMENT_PERIOD: BlockNumber = 0; +const PROMOTE_MIN_APPROVAL: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(60), + ceil: Perbill::from_percent(100), +}; +const PROMOTE_MIN_SUPPORT: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(100), +}; + +pub struct TracksInfo; +impl pallet_referenda::TracksInfo for TracksInfo { + type Id = TrackId; + type RuntimeOrigin = ::PalletsOrigin; + fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo)] { + use constants as tracks; + static DATA: [(TrackId, pallet_referenda::TrackInfo); 21] = [ + ( + tracks::MEMBERS, + pallet_referenda::TrackInfo { + name: "members", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::PROFICIENTS, + pallet_referenda::TrackInfo { + name: "proficient members", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::FELLOWS, + pallet_referenda::TrackInfo { + name: "fellows", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::ARCHITECTS, + pallet_referenda::TrackInfo { + name: "architects", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::ARCHITECTS_ADEPT, + pallet_referenda::TrackInfo { + name: "architects adept", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::GRAND_ARCHITECTS, + pallet_referenda::TrackInfo { + name: "grand architects", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::MASTERS, + pallet_referenda::TrackInfo { + name: "masters", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::MASTERS_CONSTANT, + pallet_referenda::TrackInfo { + name: "masters constant", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::GRAND_MASTERS, + pallet_referenda::TrackInfo { + name: "grand masters", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::RETAIN_AT_1DAN, + pallet_referenda::TrackInfo { + name: "retain at I Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_2DAN, + pallet_referenda::TrackInfo { + name: "retain at II Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_3DAN, + pallet_referenda::TrackInfo { + name: "retain at III Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_4DAN, + pallet_referenda::TrackInfo { + name: "retain at IV Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_5DAN, + pallet_referenda::TrackInfo { + name: "retain at V Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_6DAN, + pallet_referenda::TrackInfo { + name: "retain at VI Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_1DAN, + pallet_referenda::TrackInfo { + name: "promote to I Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_2DAN, + pallet_referenda::TrackInfo { + name: "promote to II Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_3DAN, + pallet_referenda::TrackInfo { + name: "promote to III Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_4DAN, + pallet_referenda::TrackInfo { + name: "promote to IV Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_5DAN, + pallet_referenda::TrackInfo { + name: "promote to V Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_6DAN, + pallet_referenda::TrackInfo { + name: "promote to VI Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ]; + &DATA[..] + } + fn track_for(id: &Self::RuntimeOrigin) -> Result { + use super::origins::Origin; + use constants as tracks; + + #[cfg(feature = "runtime-benchmarks")] + { + // For benchmarks, we enable a root origin. + // It is important that this is not available in production! + let root: Self::RuntimeOrigin = frame_system::RawOrigin::Root.into(); + if &root == id { + return Ok(tracks::GRAND_MASTERS) + } + } + + match Origin::try_from(id.clone()) { + Ok(Origin::Members) => Ok(tracks::MEMBERS), + Ok(Origin::Fellowship2Dan) => Ok(tracks::PROFICIENTS), + Ok(Origin::Fellows) => Ok(tracks::FELLOWS), + Ok(Origin::Architects) => Ok(tracks::ARCHITECTS), + Ok(Origin::Fellowship5Dan) => Ok(tracks::ARCHITECTS_ADEPT), + Ok(Origin::Fellowship6Dan) => Ok(tracks::GRAND_ARCHITECTS), + Ok(Origin::Masters) => Ok(tracks::MASTERS), + Ok(Origin::Fellowship8Dan) => Ok(tracks::MASTERS_CONSTANT), + Ok(Origin::Fellowship9Dan) => Ok(tracks::GRAND_MASTERS), + + Ok(Origin::RetainAt1Dan) => Ok(tracks::RETAIN_AT_1DAN), + Ok(Origin::RetainAt2Dan) => Ok(tracks::RETAIN_AT_2DAN), + Ok(Origin::RetainAt3Dan) => Ok(tracks::RETAIN_AT_3DAN), + Ok(Origin::RetainAt4Dan) => Ok(tracks::RETAIN_AT_4DAN), + Ok(Origin::RetainAt5Dan) => Ok(tracks::RETAIN_AT_5DAN), + Ok(Origin::RetainAt6Dan) => Ok(tracks::RETAIN_AT_6DAN), + + Ok(Origin::PromoteTo1Dan) => Ok(tracks::PROMOTE_TO_1DAN), + Ok(Origin::PromoteTo2Dan) => Ok(tracks::PROMOTE_TO_2DAN), + Ok(Origin::PromoteTo3Dan) => Ok(tracks::PROMOTE_TO_3DAN), + Ok(Origin::PromoteTo4Dan) => Ok(tracks::PROMOTE_TO_4DAN), + Ok(Origin::PromoteTo5Dan) => Ok(tracks::PROMOTE_TO_5DAN), + Ok(Origin::PromoteTo6Dan) => Ok(tracks::PROMOTE_TO_6DAN), + + _ => Err(()), + } + } +} +pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs new file mode 100644 index 00000000000..9f4c2a6a4c9 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs @@ -0,0 +1,229 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::OriginCaller; +use frame_support::{ + dispatch::DispatchResultWithPostInfo, + traits::{Currency, Get, Imbalance, OnUnbalanced, OriginTrait, PrivilegeCmp}, + weights::Weight, +}; +use log; +use pallet_alliance::{ProposalIndex, ProposalProvider}; +use parachains_common::impls::NegativeImbalance; +use sp_runtime::DispatchError; +use sp_std::{cmp::Ordering, marker::PhantomData, prelude::*}; +use xcm::latest::{Fungibility, Junction, Parent}; + +type AccountIdOf = ::AccountId; + +type ProposalOf = >::Proposal; + +type HashOf = ::Hash; + +/// Type alias to conveniently refer to the `Currency::Balance` associated type. +pub type BalanceOf = + as Currency<::AccountId>>::Balance; + +/// Implements `OnUnbalanced::on_unbalanced` to teleport slashed assets to relay chain treasury +/// account. +pub struct ToParentTreasury( + PhantomData<(TreasuryAccount, PalletAccount, T)>, +); + +impl OnUnbalanced> + for ToParentTreasury +where + T: pallet_balances::Config + pallet_xcm::Config + frame_system::Config, + <::RuntimeOrigin as OriginTrait>::AccountId: From>, + [u8; 32]: From<::AccountId>, + TreasuryAccount: Get>, + PalletAccount: Get>, + BalanceOf: Into, +{ + fn on_unbalanced(amount: NegativeImbalance) { + let amount = match amount.drop_zero() { + Ok(..) => return, + Err(amount) => amount, + }; + let imbalance = amount.peek(); + let pallet_acc: AccountIdOf = PalletAccount::get(); + let treasury_acc: AccountIdOf = TreasuryAccount::get(); + + >::resolve_creating(&pallet_acc, amount); + + let result = >::teleport_assets( + <::RuntimeOrigin>::signed(pallet_acc.into()), + Box::new(Parent.into()), + Box::new( + Junction::AccountId32 { network: None, id: treasury_acc.into() } + .into_location() + .into(), + ), + Box::new((Parent, imbalance).into()), + 0, + ); + + if let Err(err) = result { + log::warn!("Failed to teleport slashed assets: {:?}", err); + } + } +} + +/// Proposal provider for alliance pallet. +/// Adapter from collective pallet to alliance proposal provider trait. +pub struct AllianceProposalProvider(PhantomData<(T, I)>); + +impl ProposalProvider, HashOf, ProposalOf> + for AllianceProposalProvider +where + T: pallet_collective::Config + frame_system::Config, + I: 'static, +{ + fn propose_proposal( + who: AccountIdOf, + threshold: u32, + proposal: Box>, + length_bound: u32, + ) -> Result<(u32, u32), DispatchError> { + pallet_collective::Pallet::::do_propose_proposed( + who, + threshold, + proposal, + length_bound, + ) + } + + fn vote_proposal( + who: AccountIdOf, + proposal: HashOf, + index: ProposalIndex, + approve: bool, + ) -> Result { + pallet_collective::Pallet::::do_vote(who, proposal, index, approve) + } + + fn close_proposal( + proposal_hash: HashOf, + proposal_index: ProposalIndex, + proposal_weight_bound: Weight, + length_bound: u32, + ) -> DispatchResultWithPostInfo { + pallet_collective::Pallet::::do_close( + proposal_hash, + proposal_index, + proposal_weight_bound, + length_bound, + ) + } + + fn proposal_of(proposal_hash: HashOf) -> Option> { + pallet_collective::Pallet::::proposal_of(proposal_hash) + } +} + +/// Used to compare the privilege of an origin inside the scheduler. +pub struct EqualOrGreatestRootCmp; + +impl PrivilegeCmp for EqualOrGreatestRootCmp { + fn cmp_privilege(left: &OriginCaller, right: &OriginCaller) -> Option { + if left == right { + return Some(Ordering::Equal) + } + match (left, right) { + // Root is greater than anything. + (OriginCaller::system(frame_system::RawOrigin::Root), _) => Some(Ordering::Greater), + _ => None, + } + } +} + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarks { + use super::*; + use crate::ParachainSystem; + use cumulus_primitives_core::{ChannelStatus, GetChannelInfo}; + use frame_support::traits::{ + fungible, + tokens::{Pay, PaymentStatus}, + }; + use pallet_ranked_collective::Rank; + use parachains_common::{AccountId, Balance}; + use sp_runtime::traits::Convert; + + /// Rank to salary conversion helper type. + pub struct RankToSalary(PhantomData); + impl Convert for RankToSalary + where + Fungible: fungible::Inspect, + { + fn convert(r: Rank) -> Balance { + Balance::from(r).saturating_mul(Fungible::minimum_balance()) + } + } + + /// Trait for setting up any prerequisites for successful execution of benchmarks. + pub trait EnsureSuccessful { + fn ensure_successful(); + } + + /// Implementation of the [`EnsureSuccessful`] trait which opens an HRMP channel between + /// the Collectives and a parachain with a given ID. + pub struct OpenHrmpChannel(PhantomData); + impl> EnsureSuccessful for OpenHrmpChannel { + fn ensure_successful() { + if let ChannelStatus::Closed = ParachainSystem::get_channel_status(I::get().into()) { + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(I::get().into()) + } + } + } + + /// Type that wraps a type implementing the [`Pay`] trait to decorate its + /// [`Pay::ensure_successful`] function with a provided implementation of the + /// [`EnsureSuccessful`] trait. + pub struct PayWithEnsure(PhantomData<(O, E)>); + impl Pay for PayWithEnsure + where + O: Pay, + E: EnsureSuccessful, + { + type AssetKind = O::AssetKind; + type Balance = O::Balance; + type Beneficiary = O::Beneficiary; + type Error = O::Error; + type Id = O::Id; + + fn pay( + who: &Self::Beneficiary, + asset_kind: Self::AssetKind, + amount: Self::Balance, + ) -> Result { + O::pay(who, asset_kind, amount) + } + fn check_payment(id: Self::Id) -> PaymentStatus { + O::check_payment(id) + } + fn ensure_successful( + who: &Self::Beneficiary, + asset_kind: Self::AssetKind, + amount: Self::Balance, + ) { + E::ensure_successful(); + O::ensure_successful(who, asset_kind, amount) + } + fn ensure_concluded(id: Self::Id) { + O::ensure_concluded(id) + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs new file mode 100644 index 00000000000..8c5593e154d --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -0,0 +1,1023 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Collectives Parachain +//! +//! This parachain is for collectives that serve the Westend network. +//! Each collective is defined by a specialized (possibly instanced) pallet. +//! +//! ### Governance +//! +//! As a system parachain, Collectives defers its governance (namely, its `Root` origin), to +//! its Relay Chain parent, Westend. +//! +//! ### Collator Selection +//! +//! Collectives uses `pallet-collator-selection`, a simple first-come-first-served registration +//! system where collators can reserve a small bond to join the block producer set. There is no +//! slashing. Collective members are generally expected to run collators. + +#![cfg_attr(not(feature = "std"), no_std)] +#![recursion_limit = "256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +pub mod ambassador; +pub mod impls; +mod weights; +pub mod xcm_config; +// Fellowship configurations. +pub mod fellowship; +pub use ambassador::pallet_ambassador_origins; + +use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use fellowship::{pallet_fellowship_origins, Fellows}; +use impls::{AllianceProposalProvider, EqualOrGreatestRootCmp, ToParentTreasury}; +use sp_api::impl_runtime_apis; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, Perbill, +}; + +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; + +use codec::{Decode, Encode, MaxEncodedLen}; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use frame_support::{ + construct_runtime, + dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, + traits::{ + fungible::HoldConsideration, ConstBool, ConstU16, ConstU32, ConstU64, ConstU8, + EitherOfDiverse, InstanceFilter, LinearStoragePrice, TransformOrigin, + }, + weights::{ConstantMultiplier, Weight}, + PalletId, +}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + EnsureRoot, +}; +pub use parachains_common as common; +use parachains_common::{ + impls::DealWithFees, message_queue::*, AccountId, AuraId, Balance, BlockNumber, Hash, Header, + Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, MINUTES, + NORMAL_DISPATCH_RATIO, SLOT_DURATION, +}; +use sp_runtime::RuntimeDebug; +use testnets_common::westend::{account::*, consensus::*, currency::*, fee::WeightToFee}; +use xcm_config::{GovernanceLocation, XcmOriginToTransactDispatchOrigin}; + +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; + +// Polkadot imports +use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; +use xcm::latest::{prelude::*, BodyId}; + +use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; + +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("collectives-westend"), + impl_name: create_runtime_str!("collectives-westend"), + authoring_version: 1, + spec_version: 1_003_000, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 5, + state_version: 0, +}; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +/// Privileged origin that represents Root or more than two thirds of the Alliance. +pub type RootOrAllianceTwoThirdsMajority = EitherOfDiverse< + EnsureRoot, + pallet_collective::EnsureProportionMoreThan, +>; + +parameter_types! { + pub const Version: RuntimeVersion = VERSION; + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); +} + +// Configure FRAME pallets to include in runtime. +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; + type AccountId = AccountId; + type RuntimeCall = RuntimeCall; + type Lookup = AccountIdLookup; + type Nonce = Nonce; + type Hash = Hash; + type Hashing = BlakeTwo256; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type RuntimeOrigin = RuntimeOrigin; + type BlockHashCount = BlockHashCount; + type DbWeight = RocksDbWeight; + type Version = Version; + type PalletInfo = PalletInfo; + type OnNewAccount = (); + type OnKilledAccount = (); + type AccountData = pallet_balances::AccountData; + type SystemWeightInfo = weights::frame_system::WeightInfo; + type SS58Prefix = ConstU16<0>; + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = weights::pallet_timestamp::WeightInfo; +} + +impl pallet_authorship::Config for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; + type EventHandler = (CollatorSelection,); +} + +parameter_types! { + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = ConstU32<50>; + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = weights::pallet_balances::WeightInfo; + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type FreezeIdentifier = (); + type MaxHolds = ConstU32<1>; + type MaxFreezes = ConstU32<0>; +} + +parameter_types! { + /// Relay Chain `TransactionByteFee` / 10 + pub const TransactionByteFee: Balance = MILLICENTS; +} + +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = + pallet_transaction_payment::CurrencyAdapter>; + type WeightToFee = WeightToFee; + type LengthToFee = ConstantMultiplier; + type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type OperationalFeeMultiplier = ConstU8<5>; +} + +parameter_types! { + // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. + pub const DepositBase: Balance = deposit(1, 88); + // Additional storage item size of 32 bytes. + pub const DepositFactor: Balance = deposit(0, 32); +} + +impl pallet_multisig::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type DepositBase = DepositBase; + type DepositFactor = DepositFactor; + type MaxSignatories = ConstU32<100>; + type WeightInfo = weights::pallet_multisig::WeightInfo; +} + +impl pallet_utility::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type PalletsOrigin = OriginCaller; + type WeightInfo = weights::pallet_utility::WeightInfo; +} + +parameter_types! { + // One storage item; key size 32, value size 8; . + pub const ProxyDepositBase: Balance = deposit(1, 40); + // Additional storage item size of 33 bytes. + pub const ProxyDepositFactor: Balance = deposit(0, 33); + // One storage item; key size 32, value size 16 + pub const AnnouncementDepositBase: Balance = deposit(1, 48); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); +} + +/// The type used to represent the kinds of proxying allowed. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + RuntimeDebug, + MaxEncodedLen, + scale_info::TypeInfo, +)] +pub enum ProxyType { + /// Fully permissioned proxy. Can execute any call on behalf of _proxied_. + Any, + /// Can execute any call that does not transfer funds. + NonTransfer, + /// Proxy with the ability to reject time-delay proxy announcements. + CancelProxy, + /// Collator selection proxy. Can execute calls related to collator selection mechanism. + Collator, + /// Alliance proxy. Allows calls related to the Alliance. + Alliance, + /// Fellowship proxy. Allows calls related to the Fellowship. + Fellowship, + /// Ambassador proxy. Allows calls related to the Ambassador Program. + Ambassador, +} +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} +impl InstanceFilter for ProxyType { + fn filter(&self, c: &RuntimeCall) -> bool { + match self { + ProxyType::Any => true, + ProxyType::NonTransfer => !matches!(c, RuntimeCall::Balances { .. }), + ProxyType::CancelProxy => matches!( + c, + RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Collator => matches!( + c, + RuntimeCall::CollatorSelection { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Alliance => matches!( + c, + RuntimeCall::AllianceMotion { .. } | + RuntimeCall::Alliance { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Fellowship => matches!( + c, + RuntimeCall::FellowshipCollective { .. } | + RuntimeCall::FellowshipReferenda { .. } | + RuntimeCall::FellowshipCore { .. } | + RuntimeCall::FellowshipSalary { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Ambassador => matches!( + c, + RuntimeCall::AmbassadorCollective { .. } | + RuntimeCall::AmbassadorReferenda { .. } | + RuntimeCall::AmbassadorContent { .. } | + RuntimeCall::AmbassadorCore { .. } | + RuntimeCall::AmbassadorSalary { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + } + } + fn is_superset(&self, o: &Self) -> bool { + match (self, o) { + (x, y) if x == y => true, + (ProxyType::Any, _) => true, + (_, ProxyType::Any) => false, + (ProxyType::NonTransfer, _) => true, + _ => false, + } + } +} + +impl pallet_proxy::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type ProxyType = ProxyType; + type ProxyDepositBase = ProxyDepositBase; + type ProxyDepositFactor = ProxyDepositFactor; + type MaxProxies = ConstU32<32>; + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = ConstU32<32>; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); +} + +impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = parachain_info::Pallet; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type ReservedDmpWeight = ReservedDmpWeight; + type OutboundXcmpMessageSource = XcmpQueue; + type XcmpMessageHandler = XcmpQueue; + type ReservedXcmpWeight = ReservedXcmpWeight; + type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, + >; +} + +impl parachain_info::Config for Runtime {} + +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + +impl cumulus_pallet_aura_ext::Config for Runtime {} + +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(xcm_config::WndLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + +impl cumulus_pallet_xcmp_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ChannelInfo = ParachainSystem; + type VersionWrapper = PolkadotXcm; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type ControllerOrigin = EitherOfDiverse, Fellows>; + type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; + type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; +} + +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; +} + +impl cumulus_pallet_dmp_queue::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type DmpSink = frame_support::traits::EnqueueWithOrigin; +} + +pub const PERIOD: u32 = 6 * HOURS; +pub const OFFSET: u32 = 0; + +impl pallet_session::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValidatorId = ::AccountId; + // we don't have stash and controller, thus we don't need the convert as well. + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ShouldEndSession = pallet_session::PeriodicSessions, ConstU32>; + type NextSessionRotation = pallet_session::PeriodicSessions, ConstU32>; + type SessionManager = CollatorSelection; + // Essentially just Aura, but let's be pedantic. + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type WeightInfo = weights::pallet_session::WeightInfo; +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; +} + +parameter_types! { + pub const PotId: PalletId = PalletId(*b"PotStake"); + pub const SessionLength: BlockNumber = 6 * HOURS; + // `StakingAdmin` pluralistic body. + pub const StakingAdminBodyId: BodyId = BodyId::Defense; +} + +/// We allow root and the `StakingAdmin` to execute privileged collator selection operations. +pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< + EnsureRoot, + EnsureXcm>, +>; + +impl pallet_collator_selection::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type UpdateOrigin = CollatorSelectionUpdateOrigin; + type PotId = PotId; + type MaxCandidates = ConstU32<100>; + type MinEligibleCollators = ConstU32<4>; + type MaxInvulnerables = ConstU32<20>; + // should be a multiple of session or things will get inconsistent + type KickThreshold = ConstU32; + type ValidatorId = ::AccountId; + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ValidatorRegistration = Session; + type WeightInfo = weights::pallet_collator_selection::WeightInfo; +} + +pub const ALLIANCE_MOTION_DURATION: BlockNumber = 5 * DAYS; + +parameter_types! { + pub const AllianceMotionDuration: BlockNumber = ALLIANCE_MOTION_DURATION; + pub MaxProposalWeight: Weight = Perbill::from_percent(50) * RuntimeBlockWeights::get().max_block; +} +pub const ALLIANCE_MAX_PROPOSALS: u32 = 100; +pub const ALLIANCE_MAX_MEMBERS: u32 = 100; + +type AllianceCollective = pallet_collective::Instance1; +impl pallet_collective::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type Proposal = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type MotionDuration = AllianceMotionDuration; + type MaxProposals = ConstU32; + type MaxMembers = ConstU32; + type DefaultVote = pallet_collective::MoreThanMajorityThenPrimeDefaultVote; + type SetMembersOrigin = EnsureRoot; + type WeightInfo = weights::pallet_collective::WeightInfo; + type MaxProposalWeight = MaxProposalWeight; +} + +pub const MAX_FELLOWS: u32 = ALLIANCE_MAX_MEMBERS; +pub const MAX_ALLIES: u32 = 100; + +parameter_types! { + pub const AllyDeposit: Balance = 1_000 * UNITS; // 1,000 WND bond to join as an Ally + // The Alliance pallet account, used as a temporary place to deposit a slashed imbalance + // before the teleport to the Treasury. + pub AlliancePalletAccount: AccountId = ALLIANCE_PALLET_ID.into_account_truncating(); + pub WestendTreasuryAccount: AccountId = WESTEND_TREASURY_PALLET_ID.into_account_truncating(); + // The number of blocks a member must wait between giving a retirement notice and retiring. + // Supposed to be greater than time required to `kick_member` with alliance motion. + pub const AllianceRetirementPeriod: BlockNumber = (90 * DAYS) + ALLIANCE_MOTION_DURATION; +} + +impl pallet_alliance::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Proposal = RuntimeCall; + type AdminOrigin = RootOrAllianceTwoThirdsMajority; + type MembershipManager = RootOrAllianceTwoThirdsMajority; + type AnnouncementOrigin = RootOrAllianceTwoThirdsMajority; + type Currency = Balances; + type Slashed = ToParentTreasury; + type InitializeMembers = AllianceMotion; + type MembershipChanged = AllianceMotion; + type RetirementPeriod = AllianceRetirementPeriod; + type IdentityVerifier = (); // Don't block accounts on identity criteria + type ProposalProvider = AllianceProposalProvider; + type MaxProposals = ConstU32; + type MaxFellows = ConstU32; + type MaxAllies = ConstU32; + type MaxUnscrupulousItems = ConstU32<100>; + type MaxWebsiteUrlLength = ConstU32<255>; + type MaxAnnouncementsCount = ConstU32<100>; + type MaxMembersCount = ConstU32; + type AllyDeposit = AllyDeposit; + type WeightInfo = weights::pallet_alliance::WeightInfo; +} + +parameter_types! { + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * RuntimeBlockWeights::get().max_block; +} + +#[cfg(not(feature = "runtime-benchmarks"))] +parameter_types! { + pub const MaxScheduledPerBlock: u32 = 50; +} + +#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + pub const MaxScheduledPerBlock: u32 = 200; +} + +impl pallet_scheduler::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type PalletsOrigin = OriginCaller; + type RuntimeCall = RuntimeCall; + type MaximumWeight = MaximumSchedulerWeight; + type ScheduleOrigin = EnsureRoot; + type MaxScheduledPerBlock = MaxScheduledPerBlock; + type WeightInfo = weights::pallet_scheduler::WeightInfo; + type OriginPrivilegeCmp = EqualOrGreatestRootCmp; + type Preimages = Preimage; +} + +parameter_types! { + pub const PreimageBaseDeposit: Balance = deposit(2, 64); + pub const PreimageByteDeposit: Balance = deposit(0, 1); + pub const PreimageHoldReason: RuntimeHoldReason = RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage); +} + +impl pallet_preimage::Config for Runtime { + type WeightInfo = weights::pallet_preimage::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type ManagerOrigin = EnsureRoot; + type Consideration = HoldConsideration< + AccountId, + Balances, + PreimageHoldReason, + LinearStoragePrice, + >; +} + +// Create the runtime by composing the FRAME pallets that were previously configured. +construct_runtime!( + pub enum Runtime + { + // System support stuff. + System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, + ParachainSystem: cumulus_pallet_parachain_system::{ + Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, + } = 1, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 2, + ParachainInfo: parachain_info::{Pallet, Storage, Config} = 3, + + // Monetary stuff. + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 10, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 11, + + // Collator support. the order of these 5 are important and shall not change. + Authorship: pallet_authorship::{Pallet, Storage} = 20, + CollatorSelection: pallet_collator_selection::{Pallet, Call, Storage, Event, Config} = 21, + Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 22, + Aura: pallet_aura::{Pallet, Storage, Config} = 23, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 24, + + // XCM helpers. + XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, + PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, + CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, + DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, + + // Handy utilities. + Utility: pallet_utility::{Pallet, Call, Event} = 40, + Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 41, + Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 42, + Preimage: pallet_preimage::{Pallet, Call, Storage, Event, HoldReason} = 43, + Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 44, + + // The main stage. + + // The Alliance. + Alliance: pallet_alliance::{Pallet, Call, Storage, Event, Config} = 50, + AllianceMotion: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config} = 51, + + // The Fellowship. + // pub type FellowshipCollectiveInstance = pallet_ranked_collective::Instance1; + FellowshipCollective: pallet_ranked_collective::::{Pallet, Call, Storage, Event} = 60, + // pub type FellowshipReferendaInstance = pallet_referenda::Instance1; + FellowshipReferenda: pallet_referenda::::{Pallet, Call, Storage, Event} = 61, + FellowshipOrigins: pallet_fellowship_origins::{Origin} = 62, + // pub type FellowshipCoreInstance = pallet_core_fellowship::Instance1; + FellowshipCore: pallet_core_fellowship::::{Pallet, Call, Storage, Event} = 63, + // pub type FellowshipSalaryInstance = pallet_salary::Instance1; + FellowshipSalary: pallet_salary::::{Pallet, Call, Storage, Event} = 64, + + // Ambassador Program. + AmbassadorCollective: pallet_ranked_collective::::{Pallet, Call, Storage, Event} = 70, + AmbassadorReferenda: pallet_referenda::::{Pallet, Call, Storage, Event} = 71, + AmbassadorOrigins: pallet_ambassador_origins::{Origin} = 72, + AmbassadorCore: pallet_core_fellowship::::{Pallet, Call, Storage, Event} = 73, + AmbassadorSalary: pallet_salary::::{Pallet, Call, Storage, Event} = 74, + AmbassadorContent: pallet_collective_content::::{Pallet, Call, Storage, Event} = 75, + } +); + +/// The address format for describing accounts. +pub type Address = sp_runtime::MultiAddress; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, +); +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; +/// All migrations executed on runtime upgrade as a nested tuple of types implementing +/// `OnRuntimeUpgrade`. Included migrations must be idempotent. +type Migrations = ( + // unreleased + pallet_collator_selection::migration::v1::MigrateToV1, +); + +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, + Migrations, +>; + +#[cfg(feature = "runtime-benchmarks")] +mod benches { + frame_benchmarking::define_benchmarks!( + [frame_system, SystemBench::] + [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] + [pallet_multisig, Multisig] + [pallet_proxy, Proxy] + [pallet_session, SessionBench::] + [pallet_utility, Utility] + [pallet_timestamp, Timestamp] + [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] + [cumulus_pallet_xcmp_queue, XcmpQueue] + [cumulus_pallet_dmp_queue, DmpQueue] + [pallet_alliance, Alliance] + [pallet_collective, AllianceMotion] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] + [pallet_preimage, Preimage] + [pallet_scheduler, Scheduler] + [pallet_referenda, FellowshipReferenda] + [pallet_ranked_collective, FellowshipCollective] + [pallet_core_fellowship, FellowshipCore] + [pallet_salary, FellowshipSalary] + [pallet_referenda, AmbassadorReferenda] + [pallet_ranked_collective, AmbassadorCollective] + [pallet_collective_content, AmbassadorContent] + [pallet_core_fellowship, AmbassadorCore] + [pallet_salary, AmbassadorSalary] + ); +} + +impl_runtime_apis! { + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().into_inner() + } + } + + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info(header) + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + let weight = Executive::try_runtime_upgrade(checks).unwrap(); + (weight, RuntimeBlockWeights::get().max_block) + } + + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use sp_storage::TrackedStorageKey; + + use frame_system_benchmarking::Pallet as SystemBench; + impl frame_system_benchmarking::Config for Runtime { + fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); + Ok(()) + } + + fn verify_set_code() { + System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); + } + } + + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + impl cumulus_pallet_session_benchmarking::Config for Runtime {} + + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between Collectives and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on Collectives. + None + } + } + + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + + if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } + Ok(batches) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } +} + +cumulus_pallet_parachain_system::register_validate_block! { + Runtime = Runtime, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/block_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/block_weights.rs new file mode 100644 index 00000000000..e7fdb2aae2a --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/block_weights.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, Weight}, + }; + + parameter_types! { + /// Importing a block with 0 Extrinsics. + pub const BlockExecutionWeight: Weight = + Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(5_000_000), 0); + } + + #[cfg(test)] + mod test_weights { + use frame_support::weights::constants; + + /// Checks that the weight exists and is sane. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + let w = super::constants::BlockExecutionWeight::get(); + + // At least 100 µs. + assert!( + w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, + "Weight should be at least 100 µs." + ); + // At most 50 ms. + assert!( + w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, + "Weight should be at most 50 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs new file mode 100644 index 00000000000..cc41dcd6cbb --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_dmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_dmp_queue +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_dmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65696` + // Estimated: `69161` + // Minimum execution time: 124_651_000 picoseconds. + Weight::from_parts(127_857_000, 0) + .saturating_add(Weight::from_parts(0, 69161)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65659` + // Estimated: `69124` + // Minimum execution time: 65_684_000 picoseconds. + Weight::from_parts(68_039_000, 0) + .saturating_add(Weight::from_parts(0, 69124)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_overweight_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65726` + // Estimated: `69191` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(122_035_000, 0) + .saturating_add(Weight::from_parts(0, 69191)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + fn on_idle_overweight_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65689` + // Estimated: `69154` + // Minimum execution time: 59_799_000 picoseconds. + Weight::from_parts(61_354_000, 0) + .saturating_add(Weight::from_parts(0, 69154)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 00000000000..0b7a2fc21cd --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("collectives-polkadot-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// collectives-polkadot-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/collectives/collectives-polkadot/src/weights +// --steps +// 50 +// --repeat +// 20 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `48` + // Estimated: `8121` + // Minimum execution time: 1_988_000 picoseconds. + Weight::from_parts(2_039_000, 0) + .saturating_add(Weight::from_parts(0, 8121)) + // Standard Error: 30_660 + .saturating_add(Weight::from_parts(24_419_204, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs new file mode 100644 index 00000000000..e68c075bffc --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs @@ -0,0 +1,148 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_xcmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --pallet +// cumulus-pallet-xcmp-queue +// --chain +// collectives-polkadot-dev +// --output +// cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs +// --extrinsic +// + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_xcmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_xcmp_queue::WeightInfo for WeightInfo { + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_u32() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `1627` + // Minimum execution time: 5_000_000 picoseconds. + Weight::from_parts(6_000_000, 0) + .saturating_add(Weight::from_parts(0, 1627)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn enqueue_xcmp_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `148` + // Estimated: `3517` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(14_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `1627` + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 0) + .saturating_add(Weight::from_parts(0, 1627)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `177` + // Estimated: `1662` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(5_000_000, 0) + .saturating_add(Weight::from_parts(0, 1662)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65777` + // Estimated: `69242` + // Minimum execution time: 60_000_000 picoseconds. + Weight::from_parts(63_000_000, 0) + .saturating_add(Weight::from_parts(0, 69242)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65776` + // Estimated: `69241` + // Minimum execution time: 41_000_000 picoseconds. + Weight::from_parts(43_000_000, 0) + .saturating_add(Weight::from_parts(0, 69241)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/extrinsic_weights.rs new file mode 100644 index 00000000000..1a4adb968bb --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/extrinsic_weights.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, Weight}, + }; + + parameter_types! { + /// Executing a NO-OP `System::remarks` Extrinsic. + pub const ExtrinsicBaseWeight: Weight = + Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(125_000), 0); + } + + #[cfg(test)] + mod test_weights { + use frame_support::weights::constants; + + /// Checks that the weight exists and is sane. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + let w = super::constants::ExtrinsicBaseWeight::get(); + + // At least 10 µs. + assert!( + w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, + "Weight should be at least 10 µs." + ); + // At most 1 ms. + assert!( + w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system.rs new file mode 100644 index 00000000000..b6f1dc8dc08 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system.rs @@ -0,0 +1,154 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `frame_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=frame_system +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system`. +pub struct WeightInfo(PhantomData); +impl frame_system::WeightInfo for WeightInfo { + /// The range of component `b` is `[0, 3932160]`. + fn remark(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_926_000 picoseconds. + Weight::from_parts(1_929_666, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 0 + .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) + } + /// The range of component `b` is `[0, 3932160]`. + fn remark_with_event(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_221_000 picoseconds. + Weight::from_parts(34_449_539, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_706, 0).saturating_mul(b.into())) + } + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + fn set_heap_pages() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 3_681_000 picoseconds. + Weight::from_parts(3_857_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) + /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_code() -> Weight { + // Proof Size summary in bytes: + // Measured: `156` + // Estimated: `1641` + // Minimum execution time: 101_899_621_000 picoseconds. + Weight::from_parts(106_377_672_000, 0) + .saturating_add(Weight::from_parts(0, 1641)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn set_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_039_000 picoseconds. + Weight::from_parts(2_094_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_124 + .saturating_add(Weight::from_parts(754_465, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn kill_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_103_000 picoseconds. + Weight::from_parts(2_182_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_031 + .saturating_add(Weight::from_parts(570_563, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `p` is `[0, 1000]`. + fn kill_prefix(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `82 + p * (69 ±0)` + // Estimated: `78 + p * (70 ±0)` + // Minimum execution time: 3_728_000 picoseconds. + Weight::from_parts(3_836_000, 0) + .saturating_add(Weight::from_parts(0, 78)) + // Standard Error: 1_802 + .saturating_add(Weight::from_parts(1_199_345, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs new file mode 100644 index 00000000000..1d877fdbd2b --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs @@ -0,0 +1,50 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod block_weights; +pub mod cumulus_pallet_dmp_queue; +pub mod cumulus_pallet_parachain_system; +pub mod cumulus_pallet_xcmp_queue; +pub mod extrinsic_weights; +pub mod frame_system; +pub mod pallet_alliance; +pub mod pallet_balances; +pub mod pallet_collator_selection; +pub mod pallet_collective; +pub mod pallet_collective_content; +pub mod pallet_core_fellowship_ambassador_core; +pub mod pallet_core_fellowship_fellowship_core; +pub mod pallet_message_queue; +pub mod pallet_multisig; +pub mod pallet_preimage; +pub mod pallet_proxy; +pub mod pallet_ranked_collective_ambassador_collective; +pub mod pallet_ranked_collective_fellowship_collective; +pub mod pallet_referenda_ambassador_referenda; +pub mod pallet_referenda_fellowship_referenda; +pub mod pallet_salary_ambassador_salary; +pub mod pallet_salary_fellowship_salary; +pub mod pallet_scheduler; +pub mod pallet_session; +pub mod pallet_timestamp; +pub mod pallet_utility; +pub mod pallet_xcm; +pub mod paritydb_weights; +pub mod rocksdb_weights; + +pub use block_weights::constants::BlockExecutionWeight; +pub use extrinsic_weights::constants::ExtrinsicBaseWeight; +pub use paritydb_weights::constants::ParityDbWeight; +pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_alliance.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_alliance.rs new file mode 100644 index 00000000000..d8ede609a67 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_alliance.rs @@ -0,0 +1,494 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_alliance` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_alliance +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_alliance`. +pub struct WeightInfo(PhantomData); +impl pallet_alliance::WeightInfo for WeightInfo { + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalCount` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:0 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[2, 100]`. + /// The range of component `p` is `[1, 100]`. + fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `439 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (32 ±0) + p * (36 ±0)` + // Minimum execution time: 32_783_000 picoseconds. + Weight::from_parts(32_174_037, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 198 + .saturating_add(Weight::from_parts(1_220, 0).saturating_mul(b.into())) + // Standard Error: 2_074 + .saturating_add(Weight::from_parts(40_945, 0).saturating_mul(m.into())) + // Standard Error: 2_048 + .saturating_add(Weight::from_parts(181_087, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[5, 100]`. + fn vote(m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `868 + m * (64 ±0)` + // Estimated: `6676 + m * (64 ±0)` + // Minimum execution time: 28_520_000 picoseconds. + Weight::from_parts(29_661_024, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 2_336 + .saturating_add(Weight::from_parts(89_873, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_disapproved(m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `312 + m * (96 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` + // Minimum execution time: 39_353_000 picoseconds. + Weight::from_parts(33_028_008, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 2_137 + .saturating_add(Weight::from_parts(90_946, 0).saturating_mul(m.into())) + // Standard Error: 2_084 + .saturating_add(Weight::from_parts(175_827, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_approved(_b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `762 + m * (96 ±0) + p * (41 ±0)` + // Estimated: `6676 + m * (97 ±0) + p * (40 ±0)` + // Minimum execution time: 52_835_000 picoseconds. + Weight::from_parts(45_963_292, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 3_189 + .saturating_add(Weight::from_parts(111_627, 0).saturating_mul(m.into())) + // Standard Error: 3_109 + .saturating_add(Weight::from_parts(207_923, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::Rule` (r:0 w:1) + /// Proof: `Alliance::Rule` (`max_values`: Some(1), `max_size`: Some(87), added: 582, mode: `MaxEncodedLen`) + /// The range of component `m` is `[2, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_disapproved(m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `518 + m * (96 ±0) + p * (41 ±0)` + // Estimated: `6676 + m * (109 ±0) + p * (43 ±0)` + // Minimum execution time: 49_980_000 picoseconds. + Weight::from_parts(48_110_301, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 5_057 + .saturating_add(Weight::from_parts(169_065, 0).saturating_mul(m.into())) + // Standard Error: 4_995 + .saturating_add(Weight::from_parts(201_349, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 109).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 43).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[5, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_approved(_b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `417 + m * (96 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (96 ±0) + p * (36 ±0)` + // Minimum execution time: 40_646_000 picoseconds. + Weight::from_parts(36_865_909, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 2_136 + .saturating_add(Weight::from_parts(74_341, 0).saturating_mul(m.into())) + // Standard Error: 2_059 + .saturating_add(Weight::from_parts(170_035, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 96).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:1 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[1, 100]`. + /// The range of component `z` is `[0, 100]`. + fn init_members(m: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `12362` + // Minimum execution time: 29_710_000 picoseconds. + Weight::from_parts(17_762_170, 0) + .saturating_add(Weight::from_parts(0, 12362)) + // Standard Error: 1_652 + .saturating_add(Weight::from_parts(156_967, 0).saturating_mul(m.into())) + // Standard Error: 1_632 + .saturating_add(Weight::from_parts(130_352, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::DepositOf` (r:200 w:50) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:50 w:50) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `x` is `[1, 100]`. + /// The range of component `y` is `[0, 100]`. + /// The range of component `z` is `[0, 50]`. + fn disband(x: u32, y: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + x * (52 ±0) + y * (53 ±0) + z * (250 ±0)` + // Estimated: `12362 + x * (2539 ±0) + y * (2539 ±0) + z * (2603 ±1)` + // Minimum execution time: 294_258_000 picoseconds. + Weight::from_parts(295_116_000, 0) + .saturating_add(Weight::from_parts(0, 12362)) + // Standard Error: 23_663 + .saturating_add(Weight::from_parts(553_978, 0).saturating_mul(x.into())) + // Standard Error: 23_549 + .saturating_add(Weight::from_parts(567_024, 0).saturating_mul(y.into())) + // Standard Error: 47_055 + .saturating_add(Weight::from_parts(15_439_056, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(z.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(z.into()))) + .saturating_add(Weight::from_parts(0, 2539).saturating_mul(x.into())) + .saturating_add(Weight::from_parts(0, 2539).saturating_mul(y.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(z.into())) + } + /// Storage: `Alliance::Rule` (r:0 w:1) + /// Proof: `Alliance::Rule` (`max_values`: Some(1), `max_size`: Some(87), added: 582, mode: `MaxEncodedLen`) + fn set_rule() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_538_000 picoseconds. + Weight::from_parts(8_752_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Alliance::Announcements` (r:1 w:1) + /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) + fn announce() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `10187` + // Minimum execution time: 11_213_000 picoseconds. + Weight::from_parts(11_792_000, 0) + .saturating_add(Weight::from_parts(0, 10187)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Alliance::Announcements` (r:1 w:1) + /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) + fn remove_announcement() -> Weight { + // Proof Size summary in bytes: + // Measured: `149` + // Estimated: `10187` + // Minimum execution time: 12_477_000 picoseconds. + Weight::from_parts(12_942_000, 0) + .saturating_add(Weight::from_parts(0, 10187)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Alliance::DepositOf` (r:0 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + fn join_alliance() -> Weight { + // Proof Size summary in bytes: + // Measured: `294` + // Estimated: `18048` + // Minimum execution time: 41_517_000 picoseconds. + Weight::from_parts(42_433_000, 0) + .saturating_add(Weight::from_parts(0, 18048)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + fn nominate_ally() -> Weight { + // Proof Size summary in bytes: + // Measured: `193` + // Estimated: `18048` + // Minimum execution time: 25_950_000 picoseconds. + Weight::from_parts(26_631_000, 0) + .saturating_add(Weight::from_parts(0, 18048)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn elevate_ally() -> Weight { + // Proof Size summary in bytes: + // Measured: `236` + // Estimated: `12362` + // Minimum execution time: 24_470_000 picoseconds. + Weight::from_parts(25_222_000, 0) + .saturating_add(Weight::from_parts(0, 12362)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Alliance::Members` (r:4 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::RetiringMembers` (r:0 w:1) + /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn give_retirement_notice() -> Weight { + // Proof Size summary in bytes: + // Measured: `236` + // Estimated: `23734` + // Minimum execution time: 31_519_000 picoseconds. + Weight::from_parts(32_827_000, 0) + .saturating_add(Weight::from_parts(0, 23734)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Alliance::RetiringMembers` (r:1 w:1) + /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Alliance::Members` (r:1 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::DepositOf` (r:1 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn retire() -> Weight { + // Proof Size summary in bytes: + // Measured: `517` + // Estimated: `6676` + // Minimum execution time: 38_799_000 picoseconds. + Weight::from_parts(39_634_000, 0) + .saturating_add(Weight::from_parts(0, 6676)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::DepositOf` (r:1 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn kick_member() -> Weight { + // Proof Size summary in bytes: + // Measured: `643` + // Estimated: `18048` + // Minimum execution time: 137_442_000 picoseconds. + Weight::from_parts(142_142_000, 0) + .saturating_add(Weight::from_parts(0, 18048)) + .saturating_add(T::DbWeight::get().reads(13)) + .saturating_add(T::DbWeight::get().writes(8)) + } + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + /// The range of component `l` is `[0, 255]`. + fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `27187` + // Minimum execution time: 7_189_000 picoseconds. + Weight::from_parts(7_387_000, 0) + .saturating_add(Weight::from_parts(0, 27187)) + // Standard Error: 3_417 + .saturating_add(Weight::from_parts(1_581_413, 0).saturating_mul(n.into())) + // Standard Error: 1_338 + .saturating_add(Weight::from_parts(67_739, 0).saturating_mul(l.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + /// The range of component `l` is `[0, 255]`. + fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + l * (100 ±0) + n * (289 ±0)` + // Estimated: `27187` + // Minimum execution time: 7_201_000 picoseconds. + Weight::from_parts(7_325_000, 0) + .saturating_add(Weight::from_parts(0, 27187)) + // Standard Error: 183_302 + .saturating_add(Weight::from_parts(16_886_382, 0).saturating_mul(n.into())) + // Standard Error: 71_789 + .saturating_add(Weight::from_parts(352_937, 0).saturating_mul(l.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Alliance::Members` (r:3 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn abdicate_fellow_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `236` + // Estimated: `18048` + // Minimum execution time: 29_653_000 picoseconds. + Weight::from_parts(30_365_000, 0) + .saturating_add(Weight::from_parts(0, 18048)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_balances.rs new file mode 100644 index 00000000000..6c1cf072257 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_balances.rs @@ -0,0 +1,152 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_balances` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_balances +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_balances`. +pub struct WeightInfo(PhantomData); +impl pallet_balances::WeightInfo for WeightInfo { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_allow_death() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 55_696_000 picoseconds. + Weight::from_parts(56_582_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 40_885_000 picoseconds. + Weight::from_parts(41_993_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_set_balance_creating() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 14_565_000 picoseconds. + Weight::from_parts(15_080_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_set_balance_killing() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 22_158_000 picoseconds. + Weight::from_parts(22_715_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `6196` + // Minimum execution time: 57_957_000 picoseconds. + Weight::from_parts(58_618_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_all() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 52_018_000 picoseconds. + Weight::from_parts(52_795_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_unreserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 17_469_000 picoseconds. + Weight::from_parts(18_030_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:999 w:999) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `u` is `[1, 1000]`. + fn upgrade_accounts(u: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + u * (136 ±0)` + // Estimated: `990 + u * (2603 ±0)` + // Minimum execution time: 17_223_000 picoseconds. + Weight::from_parts(17_587_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 16_201 + .saturating_add(Weight::from_parts(15_360_967, 0).saturating_mul(u.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collator_selection.rs new file mode 100644 index 00000000000..03f3ff602a5 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collator_selection.rs @@ -0,0 +1,246 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_collator_selection` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_collator_selection +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collator_selection`. +pub struct WeightInfo(PhantomData); +impl pallet_collator_selection::WeightInfo for WeightInfo { + /// Storage: `Session::NextKeys` (r:20 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 20]`. + fn set_invulnerables(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `163 + b * (79 ±0)` + // Estimated: `1154 + b * (2555 ±0)` + // Minimum execution time: 14_616_000 picoseconds. + Weight::from_parts(12_150_410, 0) + .saturating_add(Weight::from_parts(0, 1154)) + // Standard Error: 6_270 + .saturating_add(Weight::from_parts(3_256_932, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) + } + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 19]`. + /// The range of component `c` is `[1, 99]`. + fn add_invulnerable(b: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `756 + b * (32 ±0) + c * (53 ±0)` + // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` + // Minimum execution time: 48_450_000 picoseconds. + Weight::from_parts(51_166_679, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_588 + .saturating_add(Weight::from_parts(167_219, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `b` is `[5, 20]`. + fn remove_invulnerable(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `119 + b * (32 ±0)` + // Estimated: `6287` + // Minimum execution time: 15_830_000 picoseconds. + Weight::from_parts(15_792_847, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 5_343 + .saturating_add(Weight::from_parts(167_955, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) + /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_desired_candidates() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_424_000 picoseconds. + Weight::from_parts(7_767_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_388_000 picoseconds. + Weight::from_parts(7_677_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) + /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[1, 99]`. + fn register_as_candidate(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `736 + c * (52 ±0)` + // Estimated: `6287 + c * (54 ±0)` + // Minimum execution time: 41_241_000 picoseconds. + Weight::from_parts(46_090_319, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_918 + .saturating_add(Weight::from_parts(161_140, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[4, 100]`. + fn leave_intent(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_221_000 picoseconds. + Weight::from_parts(36_183_872, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_766 + .saturating_add(Weight::from_parts(168_742, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn note_author() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `6196` + // Minimum execution time: 43_910_000 picoseconds. + Weight::from_parts(44_796_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:97 w:97) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 100]`. + /// The range of component `c` is `[1, 100]`. + fn new_session(r: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `2243 + c * (97 ±0) + r * (112 ±0)` + // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` + // Minimum execution time: 17_092_000 picoseconds. + Weight::from_parts(17_635_000, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 351_635 + .saturating_add(Weight::from_parts(15_162_192, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective.rs new file mode 100644 index 00000000000..9133baa6120 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective.rs @@ -0,0 +1,304 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_collective` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_collective +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collective`. +pub struct WeightInfo(PhantomData); +impl pallet_collective::WeightInfo for WeightInfo { + /// Storage: `AllianceMotion::Members` (r:1 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:100 w:100) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[0, 100]`. + /// The range of component `n` is `[0, 100]`. + /// The range of component `p` is `[0, 100]`. + fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` + // Estimated: `15691 + m * (1967 ±23) + p * (4332 ±23)` + // Minimum execution time: 16_410_000 picoseconds. + Weight::from_parts(16_816_000, 0) + .saturating_add(Weight::from_parts(0, 15691)) + // Standard Error: 59_812 + .saturating_add(Weight::from_parts(4_516_537, 0).saturating_mul(m.into())) + // Standard Error: 59_812 + .saturating_add(Weight::from_parts(7_992_168, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 1967).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 4332).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[1, 100]`. + fn execute(b: u32, m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `32 + m * (32 ±0)` + // Estimated: `1518 + m * (32 ±0)` + // Minimum execution time: 14_418_000 picoseconds. + Weight::from_parts(13_588_617, 0) + .saturating_add(Weight::from_parts(0, 1518)) + // Standard Error: 21 + .saturating_add(Weight::from_parts(1_711, 0).saturating_mul(b.into())) + // Standard Error: 223 + .saturating_add(Weight::from_parts(13_836, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) + } + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:0) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[1, 100]`. + fn propose_execute(b: u32, m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `32 + m * (32 ±0)` + // Estimated: `3498 + m * (32 ±0)` + // Minimum execution time: 17_174_000 picoseconds. + Weight::from_parts(16_192_764, 0) + .saturating_add(Weight::from_parts(0, 3498)) + // Standard Error: 27 + .saturating_add(Weight::from_parts(1_672, 0).saturating_mul(b.into())) + // Standard Error: 280 + .saturating_add(Weight::from_parts(24_343, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) + } + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalCount` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:0 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[2, 100]`. + /// The range of component `p` is `[1, 100]`. + fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `322 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `3714 + m * (33 ±0) + p * (36 ±0)` + // Minimum execution time: 23_970_000 picoseconds. + Weight::from_parts(23_004_052, 0) + .saturating_add(Weight::from_parts(0, 3714)) + // Standard Error: 123 + .saturating_add(Weight::from_parts(2_728, 0).saturating_mul(b.into())) + // Standard Error: 1_291 + .saturating_add(Weight::from_parts(32_731, 0).saturating_mul(m.into())) + // Standard Error: 1_275 + .saturating_add(Weight::from_parts(199_537, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[5, 100]`. + fn vote(m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `771 + m * (64 ±0)` + // Estimated: `4235 + m * (64 ±0)` + // Minimum execution time: 25_843_000 picoseconds. + Weight::from_parts(26_092_578, 0) + .saturating_add(Weight::from_parts(0, 4235)) + // Standard Error: 1_785 + .saturating_add(Weight::from_parts(67_298, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) + } + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_disapproved(m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `360 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `3805 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 27_543_000 picoseconds. + Weight::from_parts(26_505_473, 0) + .saturating_add(Weight::from_parts(0, 3805)) + // Standard Error: 1_054 + .saturating_add(Weight::from_parts(35_295, 0).saturating_mul(m.into())) + // Standard Error: 1_028 + .saturating_add(Weight::from_parts(190_508, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `662 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `3979 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 40_375_000 picoseconds. + Weight::from_parts(34_081_294, 0) + .saturating_add(Weight::from_parts(0, 3979)) + // Standard Error: 196 + .saturating_add(Weight::from_parts(3_796, 0).saturating_mul(b.into())) + // Standard Error: 2_072 + .saturating_add(Weight::from_parts(50_954, 0).saturating_mul(m.into())) + // Standard Error: 2_020 + .saturating_add(Weight::from_parts(246_000, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_disapproved(m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `458 + m * (48 ±0) + p * (36 ±0)` + // Estimated: `3898 + m * (49 ±0) + p * (36 ±0)` + // Minimum execution time: 28_793_000 picoseconds. + Weight::from_parts(29_656_832, 0) + .saturating_add(Weight::from_parts(0, 3898)) + // Standard Error: 1_214 + .saturating_add(Weight::from_parts(22_148, 0).saturating_mul(m.into())) + // Standard Error: 1_184 + .saturating_add(Weight::from_parts(189_860, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 49).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `682 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `3999 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 40_887_000 picoseconds. + Weight::from_parts(39_529_567, 0) + .saturating_add(Weight::from_parts(0, 3999)) + // Standard Error: 191 + .saturating_add(Weight::from_parts(2_802, 0).saturating_mul(b.into())) + // Standard Error: 2_021 + .saturating_add(Weight::from_parts(35_956, 0).saturating_mul(m.into())) + // Standard Error: 1_970 + .saturating_add(Weight::from_parts(235_154, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:0 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `p` is `[1, 100]`. + fn disapprove_proposal(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `189 + p * (32 ±0)` + // Estimated: `1674 + p * (32 ±0)` + // Minimum execution time: 14_040_000 picoseconds. + Weight::from_parts(15_075_964, 0) + .saturating_add(Weight::from_parts(0, 1674)) + // Standard Error: 854 + .saturating_add(Weight::from_parts(159_597, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective_content.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective_content.rs new file mode 100644 index 00000000000..6be94db22db --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective_content.rs @@ -0,0 +1,93 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_collective_content` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-18, STEPS: `10`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --steps=10 +// --repeat=3 +// --pallet=pallet_collective_content +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collective_content`. +pub struct WeightInfo(PhantomData); +impl pallet_collective_content::WeightInfo for WeightInfo { + /// Storage: `AmbassadorContent::Charter` (r:0 w:1) + /// Proof: `AmbassadorContent::Charter` (`max_values`: Some(1), `max_size`: Some(70), added: 565, mode: `MaxEncodedLen`) + fn set_charter() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(99_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::AnnouncementsCount` (r:1 w:1) + /// Proof: `AmbassadorContent::AnnouncementsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::NextAnnouncementExpireAt` (r:1 w:1) + /// Proof: `AmbassadorContent::NextAnnouncementExpireAt` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::Announcements` (r:0 w:1) + /// Proof: `AmbassadorContent::Announcements` (`max_values`: None, `max_size`: Some(90), added: 2565, mode: `MaxEncodedLen`) + fn announce() -> Weight { + // Proof Size summary in bytes: + // Measured: `285` + // Estimated: `3507` + // Minimum execution time: 273_000_000 picoseconds. + Weight::from_parts(278_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::Announcements` (r:1 w:1) + /// Proof: `AmbassadorContent::Announcements` (`max_values`: None, `max_size`: Some(90), added: 2565, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::AnnouncementsCount` (r:1 w:1) + /// Proof: `AmbassadorContent::AnnouncementsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn remove_announcement() -> Weight { + // Proof Size summary in bytes: + // Measured: `450` + // Estimated: `3555` + // Minimum execution time: 326_000_000 picoseconds. + Weight::from_parts(338_000_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs new file mode 100644 index 00000000000..f40940a8b25 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs @@ -0,0 +1,223 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_core_fellowship` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_core_fellowship +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_core_fellowship`. +pub struct WeightInfo(PhantomData); +impl pallet_core_fellowship::WeightInfo for WeightInfo { + /// Storage: `AmbassadorCore::Params` (r:0 w:1) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + fn set_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(11_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Params` (r:1 w:0) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:0) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn bump_offboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `66011` + // Estimated: `69046` + // Minimum execution time: 96_000_000 picoseconds. + Weight::from_parts(111_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Params` (r:1 w:0) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:0) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn bump_demote() -> Weight { + // Proof Size summary in bytes: + // Measured: `66121` + // Estimated: `69046` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(116_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + fn set_active() -> Weight { + // Proof Size summary in bytes: + // Measured: `360` + // Estimated: `3514` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn induct() -> Weight { + // Proof Size summary in bytes: + // Measured: `118` + // Estimated: `3514` + // Minimum execution time: 36_000_000 picoseconds. + Weight::from_parts(36_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Params` (r:1 w:0) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn promote() -> Weight { + // Proof Size summary in bytes: + // Measured: `65989` + // Estimated: `69046` + // Minimum execution time: 95_000_000 picoseconds. + Weight::from_parts(110_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:0 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn offboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `331` + // Estimated: `3514` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + fn import() -> Weight { + // Proof Size summary in bytes: + // Measured: `285` + // Estimated: `3514` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(21_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn approve() -> Weight { + // Proof Size summary in bytes: + // Measured: `65967` + // Estimated: `69046` + // Minimum execution time: 78_000_000 picoseconds. + Weight::from_parts(104_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:0) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn submit_evidence() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `69046` + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(44_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs new file mode 100644 index 00000000000..471ee82ead7 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs @@ -0,0 +1,222 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_core_fellowship` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_core_fellowship +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_core_fellowship`. +pub struct WeightInfo(PhantomData); +impl pallet_core_fellowship::WeightInfo for WeightInfo { + /// Storage: `FellowshipCore::Params` (r:0 w:1) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + fn set_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Params` (r:1 w:0) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:0) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn bump_offboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `66144` + // Estimated: `69046` + // Minimum execution time: 109_000_000 picoseconds. + Weight::from_parts(125_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Params` (r:1 w:0) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:0) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn bump_demote() -> Weight { + // Proof Size summary in bytes: + // Measured: `66254` + // Estimated: `69046` + // Minimum execution time: 112_000_000 picoseconds. + Weight::from_parts(114_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + fn set_active() -> Weight { + // Proof Size summary in bytes: + // Measured: `493` + // Estimated: `3514` + // Minimum execution time: 22_000_000 picoseconds. + Weight::from_parts(27_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn induct() -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `3514` + // Minimum execution time: 35_000_000 picoseconds. + Weight::from_parts(36_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Params` (r:1 w:0) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn promote() -> Weight { + // Proof Size summary in bytes: + // Measured: `66122` + // Estimated: `69046` + // Minimum execution time: 97_000_000 picoseconds. + Weight::from_parts(129_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:0 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn offboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `464` + // Estimated: `3514` + // Minimum execution time: 22_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + fn import() -> Weight { + // Proof Size summary in bytes: + // Measured: `418` + // Estimated: `3514` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(24_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn approve() -> Weight { + // Proof Size summary in bytes: + // Measured: `66100` + // Estimated: `69046` + // Minimum execution time: 89_000_000 picoseconds. + Weight::from_parts(119_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:0) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn submit_evidence() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `69046` + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(52_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs new file mode 100644 index 00000000000..4bd71c4e7d4 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("collectives-polkadot-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// collectives-polkadot-dev +// --pallet +// pallet_message_queue +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/collectives/collectives-polkadot/src/weights + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `189` + // Estimated: `7534` + // Minimum execution time: 11_440_000 picoseconds. + Weight::from_parts(11_440_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `7534` + // Minimum execution time: 11_077_000 picoseconds. + Weight::from_parts(11_077_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 3_977_000 picoseconds. + Weight::from_parts(3_977_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 4_831_000 picoseconds. + Weight::from_parts(4_831_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_192_000 picoseconds. + Weight::from_parts(5_192_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 58_750_000 picoseconds. + Weight::from_parts(58_750_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:0) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `99` + // Estimated: `5007` + // Minimum execution time: 5_107_000 picoseconds. + Weight::from_parts(5_107_000, 0) + .saturating_add(Weight::from_parts(0, 5007)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 46_814_000 picoseconds. + Weight::from_parts(46_814_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 52_510_000 picoseconds. + Weight::from_parts(52_510_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 71_930_000 picoseconds. + Weight::from_parts(71_930_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs new file mode 100644 index 00000000000..a7827b72009 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs @@ -0,0 +1,164 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_multisig` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_multisig +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_multisig`. +pub struct WeightInfo(PhantomData); +impl pallet_multisig::WeightInfo for WeightInfo { + /// The range of component `z` is `[0, 10000]`. + fn as_multi_threshold_1(z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 13_288_000 picoseconds. + Weight::from_parts(14_235_741, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 5 + .saturating_add(Weight::from_parts(500, 0).saturating_mul(z.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_create(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `328 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 44_865_000 picoseconds. + Weight::from_parts(33_468_056, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_513 + .saturating_add(Weight::from_parts(130_544, 0).saturating_mul(s.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(1_422, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[3, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_approve(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `348` + // Estimated: `6811` + // Minimum execution time: 29_284_000 picoseconds. + Weight::from_parts(18_708_967, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 916 + .saturating_add(Weight::from_parts(119_202, 0).saturating_mul(s.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_447, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_complete(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `451 + s * (33 ±0)` + // Estimated: `6811` + // Minimum execution time: 49_462_000 picoseconds. + Weight::from_parts(34_470_286, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_738 + .saturating_add(Weight::from_parts(178_227, 0).saturating_mul(s.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_644, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn approve_as_multi_create(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `329 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 30_749_000 picoseconds. + Weight::from_parts(31_841_438, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_033 + .saturating_add(Weight::from_parts(123_126, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn approve_as_multi_approve(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `348` + // Estimated: `6811` + // Minimum execution time: 17_436_000 picoseconds. + Weight::from_parts(18_036_002, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 829 + .saturating_add(Weight::from_parts(109_450, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn cancel_as_multi(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `520 + s * (1 ±0)` + // Estimated: `6811` + // Minimum execution time: 31_532_000 picoseconds. + Weight::from_parts(32_818_015, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 977 + .saturating_add(Weight::from_parts(123_121, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_preimage.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_preimage.rs new file mode 100644 index 00000000000..9b45c875818 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_preimage.rs @@ -0,0 +1,232 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_preimage` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_preimage +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_preimage`. +pub struct WeightInfo(PhantomData); +impl pallet_preimage::WeightInfo for WeightInfo { + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `193 + n * (91 ±0)` + // Estimated: `3593 + n * (2566 ±0)` + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 3593) + // Standard Error: 13_720 + .saturating_add(Weight::from_parts(17_309_199, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) + } + + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 4194304]`. + fn note_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `177` + // Estimated: `3556` + // Minimum execution time: 29_323_000 picoseconds. + Weight::from_parts(29_793_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_504, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 4194304]`. + fn note_requested_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 15_581_000 picoseconds. + Weight::from_parts(15_659_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(2_500, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 4194304]`. + fn note_no_deposit_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 15_028_000 picoseconds. + Weight::from_parts(15_150_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_560, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + fn unnote_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `323` + // Estimated: `3556` + // Minimum execution time: 55_113_000 picoseconds. + Weight::from_parts(59_127_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + fn unnote_no_deposit_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `178` + // Estimated: `3556` + // Minimum execution time: 38_033_000 picoseconds. + Weight::from_parts(41_203_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `222` + // Estimated: `3556` + // Minimum execution time: 31_482_000 picoseconds. + Weight::from_parts(34_726_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_no_deposit_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `178` + // Estimated: `3556` + // Minimum execution time: 20_724_000 picoseconds. + Weight::from_parts(22_928_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_unnoted_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `3556` + // Minimum execution time: 27_015_000 picoseconds. + Weight::from_parts(29_240_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_requested_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 10_712_000 picoseconds. + Weight::from_parts(11_317_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + fn unrequest_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `178` + // Estimated: `3556` + // Minimum execution time: 34_528_000 picoseconds. + Weight::from_parts(35_982_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn unrequest_unnoted_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 11_059_000 picoseconds. + Weight::from_parts(12_458_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn unrequest_multi_referenced_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 11_502_000 picoseconds. + Weight::from_parts(12_180_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs new file mode 100644 index 00000000000..59d9f912bf1 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs @@ -0,0 +1,225 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_proxy` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_proxy +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_proxy`. +pub struct WeightInfo(PhantomData); +impl pallet_proxy::WeightInfo for WeightInfo { + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 15_597_000 picoseconds. + Weight::from_parts(16_231_993, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_665 + .saturating_add(Weight::from_parts(29_818, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn proxy_announced(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `454 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 36_685_000 picoseconds. + Weight::from_parts(36_376_358, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 3_003 + .saturating_add(Weight::from_parts(133_776, 0).saturating_mul(a.into())) + // Standard Error: 3_103 + .saturating_add(Weight::from_parts(60_315, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn remove_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 23_835_000 picoseconds. + Weight::from_parts(24_154_219, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_580 + .saturating_add(Weight::from_parts(125_884, 0).saturating_mul(a.into())) + // Standard Error: 1_632 + .saturating_add(Weight::from_parts(21_563, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn reject_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 23_997_000 picoseconds. + Weight::from_parts(24_301_638, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_658 + .saturating_add(Weight::from_parts(133_005, 0).saturating_mul(a.into())) + // Standard Error: 1_713 + .saturating_add(Weight::from_parts(20_237, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn announce(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `386 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 33_604_000 picoseconds. + Weight::from_parts(33_322_880, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_840 + .saturating_add(Weight::from_parts(114_037, 0).saturating_mul(a.into())) + // Standard Error: 1_901 + .saturating_add(Weight::from_parts(45_629, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn add_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_634_000 picoseconds. + Weight::from_parts(25_509_118, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_278 + .saturating_add(Weight::from_parts(38_401, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_855_000 picoseconds. + Weight::from_parts(25_753_505, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_819 + .saturating_add(Weight::from_parts(44_357, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxies(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 22_211_000 picoseconds. + Weight::from_parts(23_094_124, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_597 + .saturating_add(Weight::from_parts(36_725, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn create_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `139` + // Estimated: `4706` + // Minimum execution time: 26_764_000 picoseconds. + Weight::from_parts(27_667_535, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_111 + .saturating_add(Weight::from_parts(3_422, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 30]`. + fn kill_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `164 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 22_632_000 picoseconds. + Weight::from_parts(23_678_772, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_136 + .saturating_add(Weight::from_parts(26_492, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_ambassador_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_ambassador_collective.rs new file mode 100644 index 00000000000..a6372c4b89d --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_ambassador_collective.rs @@ -0,0 +1,177 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_ranked_collective` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_ranked_collective +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_ranked_collective`. +pub struct WeightInfo(PhantomData); +impl pallet_ranked_collective::WeightInfo for WeightInfo { + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn add_member() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3507` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(23_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:11 w:11) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:11 w:11) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:11 w:11) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn remove_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `508 + r * (281 ±0)` + // Estimated: `3519 + r * (2529 ±0)` + // Minimum execution time: 34_000_000 picoseconds. + Weight::from_parts(36_500_000, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 158_113 + .saturating_add(Weight::from_parts(16_000_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn promote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `210 + r * (17 ±0)` + // Estimated: `3507` + // Minimum execution time: 25_000_000 picoseconds. + Weight::from_parts(26_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + // Standard Error: 180_277 + .saturating_add(Weight::from_parts(650_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:1 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn demote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `508 + r * (71 ±0)` + // Estimated: `3519` + // Minimum execution time: 34_000_000 picoseconds. + Weight::from_parts(36_500_000, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 335_410 + .saturating_add(Weight::from_parts(550_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Voting` (r:1 w:1) + /// Proof: `AmbassadorCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `566` + // Estimated: `317568` + // Minimum execution time: 57_000_000 picoseconds. + Weight::from_parts(60_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::VotingCleanup` (r:1 w:0) + /// Proof: `AmbassadorCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Voting` (r:100 w:100) + /// Proof: `AmbassadorCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + /// The range of component `n` is `[0, 100]`. + fn cleanup_poll(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `209 + n * (52 ±0)` + // Estimated: `4365 + n * (2550 ±0)` + // Minimum execution time: 18_000_000 picoseconds. + Weight::from_parts(18_500_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + // Standard Error: 11_180 + .saturating_add(Weight::from_parts(1_335_000, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2550).saturating_mul(n.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_fellowship_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_fellowship_collective.rs new file mode 100644 index 00000000000..9c773c56ac3 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_fellowship_collective.rs @@ -0,0 +1,176 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_ranked_collective` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_ranked_collective +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_ranked_collective`. +pub struct WeightInfo(PhantomData); +impl pallet_ranked_collective::WeightInfo for WeightInfo { + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn add_member() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `3507` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:11 w:11) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:11 w:11) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:11 w:11) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn remove_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `608 + r * (281 ±0)` + // Estimated: `3519 + r * (2529 ±0)` + // Minimum execution time: 35_000_000 picoseconds. + Weight::from_parts(36_500_000, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 254_950 + .saturating_add(Weight::from_parts(15_900_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn promote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `310 + r * (17 ±0)` + // Estimated: `3507` + // Minimum execution time: 25_000_000 picoseconds. + Weight::from_parts(25_500_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + // Standard Error: 70_710 + .saturating_add(Weight::from_parts(400_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:1 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn demote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `608 + r * (71 ±0)` + // Estimated: `3519` + // Minimum execution time: 35_000_000 picoseconds. + Weight::from_parts(37_500_000, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 150_000 + .saturating_add(Weight::from_parts(350_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Voting` (r:1 w:1) + /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `317568` + // Minimum execution time: 57_000_000 picoseconds. + Weight::from_parts(57_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::VotingCleanup` (r:1 w:0) + /// Proof: `FellowshipCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Voting` (r:100 w:100) + /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + /// The range of component `n` is `[0, 100]`. + fn cleanup_poll(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `343 + n * (52 ±0)` + // Estimated: `4365 + n * (2550 ±0)` + // Minimum execution time: 18_000_000 picoseconds. + Weight::from_parts(19_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + // Standard Error: 25_000 + .saturating_add(Weight::from_parts(1_395_000, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2550).saturating_mul(n.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_ambassador_referenda.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_ambassador_referenda.rs new file mode 100644 index 00000000000..fdc451c5d31 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_ambassador_referenda.rs @@ -0,0 +1,536 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_referenda +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo(PhantomData); +impl pallet_referenda::WeightInfo for WeightInfo { + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `255` + // Estimated: `159279` + // Minimum execution time: 32_000_000 picoseconds. + Weight::from_parts(34_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `366` + // Estimated: `317568` + // Minimum execution time: 63_000_000 picoseconds. + Weight::from_parts(68_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1165` + // Estimated: `159279` + // Minimum execution time: 97_000_000 picoseconds. + Weight::from_parts(123_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1173` + // Estimated: `159279` + // Minimum execution time: 104_000_000 picoseconds. + Weight::from_parts(111_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `702` + // Estimated: `317568` + // Minimum execution time: 140_000_000 picoseconds. + Weight::from_parts(150_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `601` + // Estimated: `317568` + // Minimum execution time: 81_000_000 picoseconds. + Weight::from_parts(82_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `317` + // Estimated: `4365` + // Minimum execution time: 38_000_000 picoseconds. + Weight::from_parts(38_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `167` + // Estimated: `4365` + // Minimum execution time: 17_000_000 picoseconds. + Weight::from_parts(18_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `311` + // Estimated: `317568` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AmbassadorReferenda::MetadataOf` (r:1 w:0) + /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `626` + // Estimated: `317568` + // Minimum execution time: 183_000_000 picoseconds. + Weight::from_parts(187_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:0) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3636` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) + .saturating_add(Weight::from_parts(0, 3636)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `1412` + // Estimated: `159279` + // Minimum execution time: 88_000_000 picoseconds. + Weight::from_parts(97_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `1412` + // Estimated: `159279` + // Minimum execution time: 87_000_000 picoseconds. + Weight::from_parts(92_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `935` + // Estimated: `4365` + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(46_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `935` + // Estimated: `4365` + // Minimum execution time: 39_000_000 picoseconds. + Weight::from_parts(43_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `951` + // Estimated: `4365` + // Minimum execution time: 48_000_000 picoseconds. + Weight::from_parts(50_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `959` + // Estimated: `4365` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(48_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `263` + // Estimated: `159279` + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(30_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `311` + // Estimated: `159279` + // Minimum execution time: 26_000_000 picoseconds. + Weight::from_parts(28_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `208` + // Estimated: `4365` + // Minimum execution time: 19_000_000 picoseconds. + Weight::from_parts(20_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `546` + // Estimated: `159279` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(46_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `647` + // Estimated: `159279` + // Minimum execution time: 87_000_000 picoseconds. + Weight::from_parts(93_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `159279` + // Minimum execution time: 100_000_000 picoseconds. + Weight::from_parts(120_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `683` + // Estimated: `159279` + // Minimum execution time: 90_000_000 picoseconds. + Weight::from_parts(100_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `159279` + // Minimum execution time: 77_000_000 picoseconds. + Weight::from_parts(82_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `704` + // Estimated: `159279` + // Minimum execution time: 68_000_000 picoseconds. + Weight::from_parts(77_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `704` + // Estimated: `317568` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(104_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `159279` + // Minimum execution time: 87_000_000 picoseconds. + Weight::from_parts(100_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::MetadataOf` (r:0 w:1) + /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `419` + // Estimated: `4365` + // Minimum execution time: 23_000_000 picoseconds. + Weight::from_parts(25_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::MetadataOf` (r:1 w:1) + /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `285` + // Estimated: `4365` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(21_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_fellowship_referenda.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_fellowship_referenda.rs new file mode 100644 index 00000000000..63f68833795 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_fellowship_referenda.rs @@ -0,0 +1,535 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_referenda +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo(PhantomData); +impl pallet_referenda::WeightInfo for WeightInfo { + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `389` + // Estimated: `159279` + // Minimum execution time: 34_000_000 picoseconds. + Weight::from_parts(36_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `317568` + // Minimum execution time: 64_000_000 picoseconds. + Weight::from_parts(67_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2038` + // Estimated: `159279` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(109_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2079` + // Estimated: `159279` + // Minimum execution time: 101_000_000 picoseconds. + Weight::from_parts(111_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `836` + // Estimated: `317568` + // Minimum execution time: 135_000_000 picoseconds. + Weight::from_parts(153_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `735` + // Estimated: `317568` + // Minimum execution time: 78_000_000 picoseconds. + Weight::from_parts(82_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `4365` + // Minimum execution time: 38_000_000 picoseconds. + Weight::from_parts(39_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `201` + // Estimated: `4365` + // Minimum execution time: 18_000_000 picoseconds. + Weight::from_parts(19_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `345` + // Estimated: `317568` + // Minimum execution time: 45_000_000 picoseconds. + Weight::from_parts(46_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:0) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `587` + // Estimated: `317568` + // Minimum execution time: 185_000_000 picoseconds. + Weight::from_parts(196_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:0) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `174` + // Estimated: `4277` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(15_000_000, 0) + .saturating_add(Weight::from_parts(0, 4277)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `2452` + // Estimated: `159279` + // Minimum execution time: 82_000_000 picoseconds. + Weight::from_parts(90_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `2452` + // Estimated: `159279` + // Minimum execution time: 91_000_000 picoseconds. + Weight::from_parts(99_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `1841` + // Estimated: `4365` + // Minimum execution time: 41_000_000 picoseconds. + Weight::from_parts(44_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `1808` + // Estimated: `4365` + // Minimum execution time: 46_000_000 picoseconds. + Weight::from_parts(55_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1824` + // Estimated: `4365` + // Minimum execution time: 49_000_000 picoseconds. + Weight::from_parts(53_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1865` + // Estimated: `4365` + // Minimum execution time: 51_000_000 picoseconds. + Weight::from_parts(54_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `297` + // Estimated: `159279` + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(30_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `345` + // Estimated: `159279` + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(29_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4365` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(21_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `680` + // Estimated: `159279` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(47_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `781` + // Estimated: `159279` + // Minimum execution time: 90_000_000 picoseconds. + Weight::from_parts(95_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `834` + // Estimated: `159279` + // Minimum execution time: 84_000_000 picoseconds. + Weight::from_parts(93_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `817` + // Estimated: `159279` + // Minimum execution time: 88_000_000 picoseconds. + Weight::from_parts(98_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `834` + // Estimated: `159279` + // Minimum execution time: 81_000_000 picoseconds. + Weight::from_parts(93_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `838` + // Estimated: `159279` + // Minimum execution time: 74_000_000 picoseconds. + Weight::from_parts(77_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `838` + // Estimated: `317568` + // Minimum execution time: 105_000_000 picoseconds. + Weight::from_parts(123_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `834` + // Estimated: `159279` + // Minimum execution time: 90_000_000 picoseconds. + Weight::from_parts(100_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:0 w:1) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `453` + // Estimated: `4365` + // Minimum execution time: 24_000_000 picoseconds. + Weight::from_parts(24_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:1) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `319` + // Estimated: `4365` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(23_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_ambassador_salary.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_ambassador_salary.rs new file mode 100644 index 00000000000..0522420f2f5 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_ambassador_salary.rs @@ -0,0 +1,190 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_salary` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_salary +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_salary`. +pub struct WeightInfo(PhantomData); +impl pallet_salary::WeightInfo for WeightInfo { + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + fn init() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `1541` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(14_000_000, 0) + .saturating_add(Weight::from_parts(0, 1541)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + fn bump() -> Weight { + // Proof Size summary in bytes: + // Measured: `191` + // Estimated: `1541` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 0) + .saturating_add(Weight::from_parts(0, 1541)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:0) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + fn induct() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `3551` + // Minimum execution time: 23_000_000 picoseconds. + Weight::from_parts(23_000_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + fn register() -> Weight { + // Proof Size summary in bytes: + // Measured: `467` + // Estimated: `3551` + // Minimum execution time: 27_000_000 picoseconds. + Weight::from_parts(28_000_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout() -> Weight { + // Proof Size summary in bytes: + // Measured: `879` + // Estimated: `4344` + // Minimum execution time: 68_000_000 picoseconds. + Weight::from_parts(72_000_000, 0) + .saturating_add(Weight::from_parts(0, 4344)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `879` + // Estimated: `4344` + // Minimum execution time: 69_000_000 picoseconds. + Weight::from_parts(70_000_000, 0) + .saturating_add(Weight::from_parts(0, 4344)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn check_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `479` + // Estimated: `3944` + // Minimum execution time: 27_000_000 picoseconds. + Weight::from_parts(28_000_000, 0) + .saturating_add(Weight::from_parts(0, 3944)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_fellowship_salary.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_fellowship_salary.rs new file mode 100644 index 00000000000..37680b4e5df --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_fellowship_salary.rs @@ -0,0 +1,189 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_salary` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_salary +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_salary`. +pub struct WeightInfo(PhantomData); +impl pallet_salary::WeightInfo for WeightInfo { + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + fn init() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `1541` + // Minimum execution time: 13_000_000 picoseconds. + Weight::from_parts(17_000_000, 0) + .saturating_add(Weight::from_parts(0, 1541)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + fn bump() -> Weight { + // Proof Size summary in bytes: + // Measured: `224` + // Estimated: `1541` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(18_000_000, 0) + .saturating_add(Weight::from_parts(0, 1541)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:0) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + fn induct() -> Weight { + // Proof Size summary in bytes: + // Measured: `395` + // Estimated: `3551` + // Minimum execution time: 22_000_000 picoseconds. + Weight::from_parts(25_000_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + fn register() -> Weight { + // Proof Size summary in bytes: + // Measured: `462` + // Estimated: `3551` + // Minimum execution time: 26_000_000 picoseconds. + Weight::from_parts(29_000_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout() -> Weight { + // Proof Size summary in bytes: + // Measured: `774` + // Estimated: `4239` + // Minimum execution time: 67_000_000 picoseconds. + Weight::from_parts(74_000_000, 0) + .saturating_add(Weight::from_parts(0, 4239)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `774` + // Estimated: `4239` + // Minimum execution time: 66_000_000 picoseconds. + Weight::from_parts(71_000_000, 0) + .saturating_add(Weight::from_parts(0, 4239)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn check_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `512` + // Estimated: `3977` + // Minimum execution time: 26_000_000 picoseconds. + Weight::from_parts(27_000_000, 0) + .saturating_add(Weight::from_parts(0, 3977)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs new file mode 100644 index 00000000000..cf5610df665 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs @@ -0,0 +1,206 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_scheduler` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_scheduler +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_scheduler`. +pub struct WeightInfo(PhantomData); +impl pallet_scheduler::WeightInfo for WeightInfo { + /// Storage: `Scheduler::IncompleteSince` (r:1 w:1) + /// Proof: `Scheduler::IncompleteSince` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn service_agendas_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `31` + // Estimated: `1489` + // Minimum execution time: 3_441_000 picoseconds. + Weight::from_parts(3_604_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 200]`. + fn service_agenda_base(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `77 + s * (177 ±0)` + // Estimated: `159279` + // Minimum execution time: 2_879_000 picoseconds. + Weight::from_parts(2_963_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 3_764 + .saturating_add(Weight::from_parts(909_557, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_task_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_172_000 picoseconds. + Weight::from_parts(5_294_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `s` is `[128, 4194304]`. + fn service_task_fetched(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `213 + s * (1 ±0)` + // Estimated: `3678 + s * (1 ±0)` + // Minimum execution time: 19_704_000 picoseconds. + Weight::from_parts(19_903_000, 0) + .saturating_add(Weight::from_parts(0, 3678)) + // Standard Error: 5 + .saturating_add(Weight::from_parts(1_394, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) + } + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn service_task_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_359_000 picoseconds. + Weight::from_parts(6_599_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_task_periodic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_217_000 picoseconds. + Weight::from_parts(5_333_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_dispatch_signed() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_406_000 picoseconds. + Weight::from_parts(2_541_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_dispatch_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_370_000 picoseconds. + Weight::from_parts(2_561_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 199]`. + fn schedule(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `77 + s * (177 ±0)` + // Estimated: `159279` + // Minimum execution time: 11_784_000 picoseconds. + Weight::from_parts(5_574_404, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 7_217 + .saturating_add(Weight::from_parts(1_035_248, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 200]`. + fn cancel(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `77 + s * (177 ±0)` + // Estimated: `159279` + // Minimum execution time: 16_373_000 picoseconds. + Weight::from_parts(3_088_135, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 7_095 + .saturating_add(Weight::from_parts(1_745_270, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 199]`. + fn schedule_named(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `468 + s * (179 ±0)` + // Estimated: `159279` + // Minimum execution time: 14_822_000 picoseconds. + Weight::from_parts(9_591_402, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 7_151 + .saturating_add(Weight::from_parts(1_058_408, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 200]`. + fn cancel_named(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `509 + s * (179 ±0)` + // Estimated: `159279` + // Minimum execution time: 18_541_000 picoseconds. + Weight::from_parts(6_522_239, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 8_349 + .saturating_add(Weight::from_parts(1_760_431, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_session.rs new file mode 100644 index 00000000000..2ac0804df89 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_session.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_session` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_session +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_session`. +pub struct WeightInfo(PhantomData); +impl pallet_session::WeightInfo for WeightInfo { + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:1 w:1) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn set_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `270` + // Estimated: `3735` + // Minimum execution time: 16_663_000 picoseconds. + Weight::from_parts(17_246_000, 0) + .saturating_add(Weight::from_parts(0, 3735)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:0 w:1) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn purge_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `3707` + // Minimum execution time: 11_850_000 picoseconds. + Weight::from_parts(12_204_000, 0) + .saturating_add(Weight::from_parts(0, 3707)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_timestamp.rs new file mode 100644 index 00000000000..ca06f43f92e --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_timestamp.rs @@ -0,0 +1,74 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_timestamp` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_timestamp +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_timestamp`. +pub struct WeightInfo(PhantomData); +impl pallet_timestamp::WeightInfo for WeightInfo { + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Aura::CurrentSlot` (r:1 w:0) + /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set() -> Weight { + // Proof Size summary in bytes: + // Measured: `49` + // Estimated: `1493` + // Minimum execution time: 7_863_000 picoseconds. + Weight::from_parts(8_183_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `57` + // Estimated: `0` + // Minimum execution time: 3_460_000 picoseconds. + Weight::from_parts(3_577_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs new file mode 100644 index 00000000000..c60a79d91da --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs @@ -0,0 +1,101 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_utility` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_utility +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_utility`. +pub struct WeightInfo(PhantomData); +impl pallet_utility::WeightInfo for WeightInfo { + /// The range of component `c` is `[0, 1000]`. + fn batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_650_000 picoseconds. + Weight::from_parts(7_474_437, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_625 + .saturating_add(Weight::from_parts(4_996_146, 0).saturating_mul(c.into())) + } + fn as_derivative() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_612_000 picoseconds. + Weight::from_parts(4_774_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn batch_all(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_744_000 picoseconds. + Weight::from_parts(10_889_913, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_281 + .saturating_add(Weight::from_parts(5_218_293, 0).saturating_mul(c.into())) + } + fn dispatch_as() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_673_000 picoseconds. + Weight::from_parts(8_980_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn force_batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_744_000 picoseconds. + Weight::from_parts(7_801_721, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_395 + .saturating_add(Weight::from_parts(5_000_971, 0).saturating_mul(c.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs new file mode 100644 index 00000000000..a3b42cb86c4 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs @@ -0,0 +1,323 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_xcm` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=collectives-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 25_746_000 picoseconds. + Weight::from_parts(26_349_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn teleport_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `69` + // Estimated: `1489` + // Minimum execution time: 22_660_000 picoseconds. + Weight::from_parts(23_173_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_321_000 picoseconds. + Weight::from_parts(7_542_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn force_default_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_232_000 picoseconds. + Weight::from_parts(2_395_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_subscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 29_006_000 picoseconds. + Weight::from_parts(29_777_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_unsubscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 31_245_000 picoseconds. + Weight::from_parts(32_125_000, 0) + .saturating_add(Weight::from_parts(0, 3828)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn force_suspension() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_255_000 picoseconds. + Weight::from_parts(2_399_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_supported_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `162` + // Estimated: `11052` + // Minimum execution time: 16_521_000 picoseconds. + Weight::from_parts(17_001_000, 0) + .saturating_add(Weight::from_parts(0, 11052)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_version_notifiers() -> Weight { + // Proof Size summary in bytes: + // Measured: `166` + // Estimated: `11056` + // Minimum execution time: 16_486_000 picoseconds. + Weight::from_parts(16_729_000, 0) + .saturating_add(Weight::from_parts(0, 11056)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn already_notified_target() -> Weight { + // Proof Size summary in bytes: + // Measured: `173` + // Estimated: `13538` + // Minimum execution time: 18_037_000 picoseconds. + Weight::from_parts(18_310_000, 0) + .saturating_add(Weight::from_parts(0, 13538)) + .saturating_add(T::DbWeight::get().reads(5)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn notify_current_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `212` + // Estimated: `6152` + // Minimum execution time: 27_901_000 picoseconds. + Weight::from_parts(28_566_000, 0) + .saturating_add(Weight::from_parts(0, 6152)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn notify_target_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `8621` + // Minimum execution time: 9_299_000 picoseconds. + Weight::from_parts(9_547_000, 0) + .saturating_add(Weight::from_parts(0, 8621)) + .saturating_add(T::DbWeight::get().reads(3)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_version_notify_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `173` + // Estimated: `11063` + // Minimum execution time: 16_768_000 picoseconds. + Weight::from_parts(17_215_000, 0) + .saturating_add(Weight::from_parts(0, 11063)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn migrate_and_notify_old_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `215` + // Estimated: `11105` + // Minimum execution time: 35_134_000 picoseconds. + Weight::from_parts(35_883_000, 0) + .saturating_add(Weight::from_parts(0, 11105)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 4_562_000 picoseconds. + Weight::from_parts(4_802_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_865_000 picoseconds. + Weight::from_parts(27_400_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/paritydb_weights.rs new file mode 100644 index 00000000000..25679703831 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/paritydb_weights.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; + + parameter_types! { + /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights + /// are available for brave runtime engineers who may want to try this out as default. + pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::constants::ParityDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/rocksdb_weights.rs new file mode 100644 index 00000000000..3dd817aa6f1 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/rocksdb_weights.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; + + parameter_types! { + /// By default, Substrate uses `RocksDB`, so this will be the weight used throughout + /// the runtime. + pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::constants::RocksDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs new file mode 100644 index 00000000000..d58995827fa --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -0,0 +1,364 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + AccountId, AllPalletsWithSystem, Balances, BaseDeliveryFee, FeeAssetId, Fellows, ParachainInfo, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, + TransactionByteFee, WeightToFee, WestendTreasuryAccount, XcmpQueue, +}; +use frame_support::{ + match_types, parameter_types, + traits::{ConstU32, Contains, Equals, Everything, Nothing}, + weights::Weight, +}; +use frame_system::EnsureRoot; +use pallet_xcm::XcmPassthrough; +use parachains_common::{ + impls::ToStakingPot, + xcm_config::{ConcreteAssetFromSystem, RelayOrOtherSystemParachains}, +}; +use polkadot_parachain_primitives::primitives::Sibling; +use polkadot_runtime_common::xcm_sender::ExponentialPrice; +use westend_runtime_constants::system_parachain; +use xcm::latest::prelude::*; +use xcm_builder::{ + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, IsConcrete, + LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, +}; +use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; + +const FELLOWSHIP_ADMIN_INDEX: u32 = 1; + +parameter_types! { + pub const WndLocation: MultiLocation = MultiLocation::parent(); + pub const RelayNetwork: Option = Some(NetworkId::Westend); + pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); + pub UniversalLocation: InteriorMultiLocation = + X2(GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())); + pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(westend_runtime_constants::TREASURY_PALLET_ID)).into(); + pub CheckingAccount: AccountId = PolkadotXcm::check_account(); + pub const GovernanceLocation: MultiLocation = MultiLocation::parent(); + pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); + pub AssetHub: MultiLocation = (Parent, Parachain(1000)).into(); + pub AssetHubUsdtId: AssetId = (PalletInstance(50), GeneralIndex(1984)).into(); + pub UsdtAssetHub: LocatableAssetId = LocatableAssetId { + location: AssetHub::get(), + asset_id: AssetHubUsdtId::get(), + }; + pub WndAssetHub: LocatableAssetId = LocatableAssetId { + location: AssetHub::get(), + asset_id: WndLocation::get().into(), + }; +} + +/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used +/// when determining ownership of accounts for asset transacting and when attempting to use XCM +/// `Transact` in order to determine the dispatch Origin. +pub type LocationToAccountId = ( + // The parent (Relay-chain) origin converts to the parent `AccountId`. + ParentIsPreset, + // Sibling parachain origins convert to AccountId via the `ParaId::into`. + SiblingParachainConvertsVia, + // Straight up local `AccountId32` origins just alias directly to `AccountId`. + AccountId32Aliases, +); + +/// Means for transacting the native currency on this chain. +pub type CurrencyTransactor = CurrencyAdapter< + // Use this currency: + Balances, + // Use this currency when it is a fungible asset matching the given location or name: + IsConcrete, + // Convert an XCM MultiLocation into a local account id: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We don't track any teleports of `Balances`. + (), +>; + +/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, +/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can +/// biases the kind of local `Origin` it will become. +pub type XcmOriginToTransactDispatchOrigin = ( + // Sovereign account converter; this attempts to derive an `AccountId` from the origin location + // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for + // foreign chains who want to have a local sovereign account on this chain which they control. + SovereignSignedViaLocation, + // Native converter for Relay-chain (Parent) location; will convert to a `Relay` origin when + // recognised. + RelayChainAsNative, + // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when + // recognised. + SiblingParachainAsNative, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, + // Native signed account converter; this just converts an `AccountId32` origin into a normal + // `RuntimeOrigin::Signed` origin of the same 32-byte value. + SignedAccountId32AsNative, + // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. + XcmPassthrough, +); + +parameter_types! { + /// The amount of weight an XCM operation takes. This is a safe overestimate. + pub const BaseXcmWeight: Weight = Weight::from_parts(1_000_000_000, 1024); + /// A temporary weight value for each XCM instruction. + /// NOTE: This should be removed after we account for PoV weights. + pub const TempFixedXcmWeight: Weight = Weight::from_parts(1_000_000_000, 0); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; + // Fellows pluralistic body. + pub const FellowsBodyId: BodyId = BodyId::Technical; +} + +match_types! { + pub type ParentOrParentsPlurality: impl Contains = { + MultiLocation { parents: 1, interior: Here } | + MultiLocation { parents: 1, interior: X1(Plurality { .. }) } + }; + pub type ParentOrSiblings: impl Contains = { + MultiLocation { parents: 1, interior: Here } | + MultiLocation { parents: 1, interior: X1(_) } + }; +} + +/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly +/// account for proof size weights. +/// +/// Calls that are allowed through this filter must: +/// 1. Have a fixed weight; +/// 2. Cannot lead to another call being made; +/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. +pub struct SafeCallFilter; +impl Contains for SafeCallFilter { + fn contains(call: &RuntimeCall) -> bool { + #[cfg(feature = "runtime-benchmarks")] + { + if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { + return true + } + } + + matches!( + call, + RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::Timestamp(..) | + RuntimeCall::Balances(..) | + RuntimeCall::CollatorSelection( + pallet_collator_selection::Call::set_desired_candidates { .. } | + pallet_collator_selection::Call::set_candidacy_bond { .. } | + pallet_collator_selection::Call::register_as_candidate { .. } | + pallet_collator_selection::Call::leave_intent { .. } | + pallet_collator_selection::Call::set_invulnerables { .. } | + pallet_collator_selection::Call::add_invulnerable { .. } | + pallet_collator_selection::Call::remove_invulnerable { .. }, + ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | + RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | + RuntimeCall::XcmpQueue(..) | + RuntimeCall::MessageQueue(..) | + RuntimeCall::Alliance( + // `init_members` accepts unbounded vecs as arguments, + // but the call can be initiated only by root origin. + pallet_alliance::Call::init_members { .. } | + pallet_alliance::Call::vote { .. } | + pallet_alliance::Call::disband { .. } | + pallet_alliance::Call::set_rule { .. } | + pallet_alliance::Call::announce { .. } | + pallet_alliance::Call::remove_announcement { .. } | + pallet_alliance::Call::join_alliance { .. } | + pallet_alliance::Call::nominate_ally { .. } | + pallet_alliance::Call::elevate_ally { .. } | + pallet_alliance::Call::give_retirement_notice { .. } | + pallet_alliance::Call::retire { .. } | + pallet_alliance::Call::kick_member { .. } | + pallet_alliance::Call::close { .. } | + pallet_alliance::Call::abdicate_fellow_status { .. }, + ) | RuntimeCall::AllianceMotion( + pallet_collective::Call::vote { .. } | + pallet_collective::Call::disapprove_proposal { .. } | + pallet_collective::Call::close { .. }, + ) | RuntimeCall::FellowshipCollective( + pallet_ranked_collective::Call::add_member { .. } | + pallet_ranked_collective::Call::promote_member { .. } | + pallet_ranked_collective::Call::demote_member { .. } | + pallet_ranked_collective::Call::remove_member { .. }, + ) | RuntimeCall::FellowshipCore( + pallet_core_fellowship::Call::bump { .. } | + pallet_core_fellowship::Call::set_params { .. } | + pallet_core_fellowship::Call::set_active { .. } | + pallet_core_fellowship::Call::approve { .. } | + pallet_core_fellowship::Call::induct { .. } | + pallet_core_fellowship::Call::promote { .. } | + pallet_core_fellowship::Call::offboard { .. } | + pallet_core_fellowship::Call::submit_evidence { .. } | + pallet_core_fellowship::Call::import { .. }, + ) + ) + } +} + +pub type Barrier = TrailingSetTopicAsId< + DenyThenTry< + DenyReserveTransferToRelayChain, + ( + // Allow local users to buy weight credit. + TakeWeightCredit, + // Expected responses are OK. + AllowKnownQueryResponses, + // Allow XCMs with some computed origins to pass through. + WithComputedOrigin< + ( + // If the message is one that immediately attempts to pay for execution, then + // allow it. + AllowTopLevelPaidExecutionFrom, + // Parent and its pluralities (i.e. governance bodies) get free execution. + AllowExplicitUnpaidExecutionFrom, + // Subscriptions for version tracking are OK. + AllowSubscriptionsFrom, + ), + UniversalLocation, + ConstU32<8>, + >, + ), + >, +>; + +match_types! { + pub type SystemParachains: impl Contains = { + MultiLocation { + parents: 1, + interior: X1(Parachain( + system_parachain::ASSET_HUB_ID | + system_parachain::BRIDGE_HUB_ID | + system_parachain::COLLECTIVES_ID + )), + } + }; +} + +/// Locations that will not be charged fees in the executor, +/// either execution or delivery. +/// We only waive fees for system functions, which these locations represent. +pub type WaivedLocations = + (RelayOrOtherSystemParachains, Equals); + +/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: +/// - DOT with the parent Relay Chain and sibling parachains. +pub type TrustedTeleporters = ConcreteAssetFromSystem; + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = CurrencyTransactor; + type OriginConverter = XcmOriginToTransactDispatchOrigin; + // Collectives does not recognize a reserve location for any asset. Users must teleport WND + // where allowed (e.g. with the Relay Chain). + type IsReserve = (); + type IsTeleporter = TrustedTeleporters; + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = + UsingComponents>; + type ResponseHandler = PolkadotXcm; + type AssetTrap = PolkadotXcm; + type AssetClaims = PolkadotXcm; + type SubscriptionService = PolkadotXcm; + type PalletInstancesInfo = AllPalletsWithSystem; + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type AssetLocker = (); + type AssetExchanger = (); + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + XcmFeeToAccount, + >; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = WithOriginFilter; + type SafeCallFilter = SafeCallFilter; + type Aliasers = Nothing; +} + +/// Converts a local signed origin into an XCM multilocation. +/// Forms the basis for local origins sending/executing XCMs. +pub type LocalOriginToLocation = SignedToAccountId32; + +pub type PriceForParentDelivery = + ExponentialPrice; + +/// The means for routing XCM messages which are not for local execution into the right message +/// queues. +pub type XcmRouter = WithUniqueTopic<( + // Two routers - use UMP to communicate with the relay chain: + cumulus_primitives_utility::ParentAsUmp, + // ..and XCMP to communicate with the sibling chains. + XcmpQueue, +)>; + +#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + pub ReachableDest: Option = Some(Parent.into()); +} + +/// Type to convert the Fellows origin to a Plurality `MultiLocation` value. +pub type FellowsToPlurality = OriginToPluralityVoice; + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + // We only allow the Fellows to send messages. + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = XcmRouter; + // We support local origins dispatching XCM executions in principle... + type ExecuteXcmOrigin = EnsureXcmOrigin; + // ... but disallow generic XCM execution. As a result only teleports are allowed. + type XcmExecuteFilter = Nothing; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location. + type Weigher = FixedWeightBounds; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = (); + type TrustedLockers = (); + type SovereignAccountOf = LocationToAccountId; + type MaxLockers = ConstU32<8>; + type WeightInfo = crate::weights::pallet_xcm::WeightInfo; + type AdminOrigin = EnsureRoot; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); +} + +impl cumulus_pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml new file mode 100644 index 00000000000..a30cdf35769 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -0,0 +1,138 @@ +[package] +name = "glutton-westend-runtime" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Glutton parachain runtime." + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } + +# Substrate +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} +frame-support = { path = "../../../../../substrate/frame/support", default-features = false} +frame-system = { path = "../../../../../substrate/frame/system", default-features = false} +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} +pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false, optional = true} +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false, optional = true} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} + +# Polkadot +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} + +# Cumulus +cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } +cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } +cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } +parachains-common = { path = "../../../common", default-features = false } + +[build-dependencies] +substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } + +[features] +default = [ "std" ] +runtime-benchmarks = [ + "cumulus-pallet-parachain-system/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-glutton/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-sudo/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "parachains-common/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] +std = [ + "codec/std", + "cumulus-pallet-aura-ext/std", + "cumulus-pallet-parachain-system/std", + "cumulus-pallet-xcm/std", + "cumulus-primitives-aura/std", + "cumulus-primitives-core/std", + "cumulus-primitives-timestamp/std", + "frame-benchmarking?/std", + "frame-executive/std", + "frame-support/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "frame-try-runtime?/std", + "pallet-aura/std", + "pallet-glutton/std", + "pallet-message-queue/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "parachain-info/std", + "parachains-common/std", + "scale-info/std", + "sp-api/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-core/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-storage/std", + "sp-transaction-pool/std", + "sp-version/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] +try-runtime = [ + "cumulus-pallet-aura-ext/try-runtime", + "cumulus-pallet-parachain-system/try-runtime", + "cumulus-pallet-xcm/try-runtime", + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-aura/try-runtime", + "pallet-glutton/try-runtime", + "pallet-message-queue/try-runtime", + "pallet-sudo/try-runtime", + "pallet-timestamp/try-runtime", + "parachain-info/try-runtime", + "sp-runtime/try-runtime", +] + +experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs new file mode 100644 index 00000000000..1580e6f07be --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs @@ -0,0 +1,24 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use substrate_wasm_builder::WasmBuilder; + +fn main() { + WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build() +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs new file mode 100644 index 00000000000..60107281c22 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -0,0 +1,532 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Glutton Westend Runtime +//! +//! The purpose of the Glutton parachain is to do stress testing on the Kusama +//! network. This runtime targets the Westend runtime to allow development +//! separate to the Kusama runtime. +//! +//! There may be multiple instances of the Glutton parachain deployed and +//! connected to its parent relay chain. +//! +//! These parachains are not holding any real value. Their purpose is to stress +//! test the network. +//! +//! ### Governance +//! +//! Glutton defers its governance (namely, its `Root` origin), to its Relay +//! Chain parent, Kusama (or Westend for development purposes). +//! +//! ### XCM +//! +//! Since the main goal of Glutton is solely stress testing, the parachain will +//! only be able receive XCM messages from the Relay Chain via DMP. This way the +//! Glutton parachains will be able to listen for upgrades that are coming from +//! the Relay chain. + +#![cfg_attr(not(feature = "std"), no_std)] +#![recursion_limit = "256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +pub mod weights; +pub mod xcm_config; + +use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; +use sp_api::impl_runtime_apis; +pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, +}; +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; + +use cumulus_primitives_core::AggregateMessageOrigin; +pub use frame_support::{ + construct_runtime, + dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, + traits::{ + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, IsInVec, Randomness, + }, + weights::{ + constants::{ + BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, + }, + IdentityFee, Weight, + }, + PalletId, StorageValue, +}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + EnsureRoot, +}; +use parachains_common::{AccountId, Signature}; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +pub use sp_runtime::{Perbill, Permill}; + +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("glutton-westend"), + impl_name: create_runtime_str!("glutton-westend"), + authoring_version: 1, + spec_version: 10000, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, + state_version: 1, +}; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +/// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. +/// This is used to limit the maximal weight of a single extrinsic. +const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); +/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used +/// by Operational extrinsics. +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +/// We allow for .5 seconds of compute with a 12 second average block time. +const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( + WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), + cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64, +); + +/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included +/// into the relay chain. +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; +/// How many parachain blocks are processed by the relay chain per parent. Limits the +/// number of blocks authored per slot. +const BLOCK_PROCESSING_VELOCITY: u32 = 2; +/// Relay chain slot duration, in milliseconds. +const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; + +/// This determines the average expected block time that we are targeting. +/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. +/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked +/// up by `pallet_aura` to implement `fn slot_duration()`. +/// +/// Change this to adjust the block time. +pub const MILLISECS_PER_BLOCK: u64 = 6000; +pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + +parameter_types! { + pub const BlockHashCount: BlockNumber = 4096; + pub const Version: RuntimeVersion = VERSION; + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Runtime { + type AccountId = AccountId; + type RuntimeCall = RuntimeCall; + type Lookup = AccountIdLookup; + type Nonce = Nonce; + type Hash = Hash; + type Hashing = BlakeTwo256; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type RuntimeOrigin = RuntimeOrigin; + type BlockHashCount = BlockHashCount; + type Version = Version; + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = frame_support::traits::Everything; + type SystemWeightInfo = (); + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; + type SS58Prefix = SS58Prefix; + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +parameter_types! { + // We do anything the parent chain tells us in this runtime. + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(2); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; +} + +type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, +>; + +impl cumulus_pallet_parachain_system::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = parachain_info::Pallet; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type OutboundXcmpMessageSource = (); + type ReservedDmpWeight = ReservedDmpWeight; + type XcmpMessageHandler = (); + type ReservedXcmpWeight = (); + type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; + type ConsensusHook = ConsensusHook; + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; +} + +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(80) * + RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + type QueueChangeHandler = (); + type QueuePausedQuery = (); // No XCMP queue pallet deployed. + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + +impl parachain_info::Config for Runtime {} + +impl cumulus_pallet_aura_ext::Config for Runtime {} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = Aura; + #[cfg(feature = "experimental")] + type MinimumPeriod = ConstU64<0>; + #[cfg(not(feature = "experimental"))] + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = weights::pallet_timestamp::WeightInfo; +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = ConstU64; +} + +impl pallet_glutton::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_glutton::WeightInfo; + type AdminOrigin = EnsureRoot; +} + +impl pallet_sudo::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type WeightInfo = (); +} + +construct_runtime! { + pub enum Runtime + { + System: frame_system::{Pallet, Call, Storage, Config, Event} = 0, + ParachainSystem: cumulus_pallet_parachain_system::{ + Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, + } = 1, + ParachainInfo: parachain_info::{Pallet, Storage, Config} = 2, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 3, + + // DMP handler. + CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Storage, Event, Origin} = 10, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 11, + + // The main stage. + Glutton: pallet_glutton::{Pallet, Call, Storage, Event, Config} = 20, + + // Collator support + Aura: pallet_aura::{Pallet, Storage, Config} = 30, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 31, + + // Sudo. + Sudo: pallet_sudo::{Pallet, Call, Storage, Event, Config} = 255, + } +} + +/// Index of a transaction in the chain. +pub type Nonce = u32; +/// A hash of some data used by the chain. +pub type Hash = sp_core::H256; +/// An index to a block. +pub type BlockNumber = u32; +/// The address format for describing accounts. +pub type Address = sp_runtime::MultiAddress; +/// Block header type as expected by this runtime. +pub type Header = generic::Header; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + pallet_sudo::CheckOnlySudoAccount, + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, +); +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, +>; + +#[cfg(feature = "runtime-benchmarks")] +#[macro_use] +extern crate frame_benchmarking; + +#[cfg(feature = "runtime-benchmarks")] +mod benches { + define_benchmarks!( + [cumulus_pallet_parachain_system, ParachainSystem] + [frame_system, SystemBench::] + [pallet_glutton, Glutton] + [pallet_message_queue, MessageQueue] + [pallet_timestamp, Timestamp] + ); +} + +impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().into_inner() + } + } + + impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { + fn can_build_upon( + included_hash: ::Hash, + slot: cumulus_primitives_aura::Slot, + ) -> bool { + ConsensusHook::can_build_upon(included_hash, slot) + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic( + extrinsic: ::Extrinsic, + ) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info(header) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use sp_storage::TrackedStorageKey; + + use frame_system_benchmarking::Pallet as SystemBench; + impl frame_system_benchmarking::Config for Runtime { + fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); + Ok(()) + } + + fn verify_set_code() { + System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); + } + } + + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + Ok(batches) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } +} + +cumulus_pallet_parachain_system::register_validate_block! { + Runtime = Runtime, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 00000000000..bc8299ab1bd --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,75 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_parachain_system +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::LastDmqMqcHead` (r:1 w:1) + /// Proof: `ParachainSystem::LastDmqMqcHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ProcessedDownwardMessages` (r:0 w:1) + /// Proof: `ParachainSystem::ProcessedDownwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1000) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `3517` + // Minimum execution time: 1_745_000 picoseconds. + Weight::from_parts(1_859_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + // Standard Error: 53_384 + .saturating_add(Weight::from_parts(196_309_089, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system.rs new file mode 100644 index 00000000000..6f8cf4f39df --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system.rs @@ -0,0 +1,153 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=frame_system +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system`. +pub struct WeightInfo(PhantomData); +impl frame_system::WeightInfo for WeightInfo { + /// The range of component `b` is `[0, 3932160]`. + fn remark(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_570_000 picoseconds. + Weight::from_parts(1_626_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 0 + .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) + } + /// The range of component `b` is `[0, 3932160]`. + fn remark_with_event(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_200_000 picoseconds. + Weight::from_parts(4_262_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(1_791, 0).saturating_mul(b.into())) + } + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + fn set_heap_pages() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 2_680_000 picoseconds. + Weight::from_parts(2_936_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) + /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_code() -> Weight { + // Proof Size summary in bytes: + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 119_097_302_000 picoseconds. + Weight::from_parts(120_914_576_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn set_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_606_000 picoseconds. + Weight::from_parts(1_704_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_090 + .saturating_add(Weight::from_parts(765_829, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn kill_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_646_000 picoseconds. + Weight::from_parts(1_719_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_067 + .saturating_add(Weight::from_parts(578_598, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `p` is `[0, 1000]`. + fn kill_prefix(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `58 + p * (69 ±0)` + // Estimated: `53 + p * (70 ±0)` + // Minimum execution time: 2_933_000 picoseconds. + Weight::from_parts(3_069_000, 0) + .saturating_add(Weight::from_parts(0, 53)) + // Standard Error: 1_844 + .saturating_add(Weight::from_parts(1_214_377, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/mod.rs new file mode 100644 index 00000000000..47f9d1ee105 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/mod.rs @@ -0,0 +1,19 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod cumulus_pallet_parachain_system; +pub mod pallet_glutton; +pub mod pallet_message_queue; +pub mod pallet_timestamp; diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_glutton.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_glutton.rs new file mode 100644 index 00000000000..9345458a704 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_glutton.rs @@ -0,0 +1,178 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_glutton` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_glutton +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_glutton`. +pub struct WeightInfo(PhantomData); +impl pallet_glutton::WeightInfo for WeightInfo { + /// Storage: `Glutton::TrashDataCount` (r:1 w:1) + /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:0 w:1000) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn initialize_pallet_grow(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `87` + // Estimated: `1489` + // Minimum execution time: 6_453_000 picoseconds. + Weight::from_parts(6_629_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + // Standard Error: 3_416 + .saturating_add(Weight::from_parts(9_938_610, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + } + /// Storage: `Glutton::TrashDataCount` (r:1 w:1) + /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:0 w:1000) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn initialize_pallet_shrink(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `120` + // Estimated: `1489` + // Minimum execution time: 6_456_000 picoseconds. + Weight::from_parts(6_564_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + // Standard Error: 1_336 + .saturating_add(Weight::from_parts(1_141_705, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + } + /// The range of component `i` is `[0, 100000]`. + fn waste_ref_time_iter(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 679_000 picoseconds. + Weight::from_parts(3_310_101, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 10 + .saturating_add(Weight::from_parts(103_703, 0).saturating_mul(i.into())) + } + /// Storage: `Glutton::TrashData` (r:5000 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + /// The range of component `i` is `[0, 5000]`. + fn waste_proof_size_some(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `119115 + i * (1022 ±0)` + // Estimated: `990 + i * (3016 ±0)` + // Minimum execution time: 765_000 picoseconds. + Weight::from_parts(1_004_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 4_008 + .saturating_add(Weight::from_parts(6_130_770, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) + .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) + } + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:1737 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + fn on_idle_high_proof_waste() -> Weight { + // Proof Size summary in bytes: + // Measured: `1900498` + // Estimated: `5239782` + // Minimum execution time: 97_248_614_000 picoseconds. + Weight::from_parts(97_728_420_000, 0) + .saturating_add(Weight::from_parts(0, 5239782)) + .saturating_add(T::DbWeight::get().reads(1739)) + } + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:5 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + fn on_idle_low_proof_waste() -> Weight { + // Proof Size summary in bytes: + // Measured: `9548` + // Estimated: `16070` + // Minimum execution time: 97_305_112_000 picoseconds. + Weight::from_parts(97_427_728_000, 0) + .saturating_add(Weight::from_parts(0, 16070)) + .saturating_add(T::DbWeight::get().reads(7)) + } + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn empty_on_idle() -> Weight { + // Proof Size summary in bytes: + // Measured: `87` + // Estimated: `1493` + // Minimum execution time: 4_125_000 picoseconds. + Weight::from_parts(4_339_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `Glutton::Compute` (r:0 w:1) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set_compute() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_879_000 picoseconds. + Weight::from_parts(4_211_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Glutton::Storage` (r:0 w:1) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set_storage() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_920_000 picoseconds. + Weight::from_parts(4_081_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs new file mode 100644 index 00000000000..eab6c15a40d --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_message_queue +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `223` + // Estimated: `6044` + // Minimum execution time: 10_833_000 picoseconds. + Weight::from_parts(11_237_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `218` + // Estimated: `6044` + // Minimum execution time: 9_399_000 picoseconds. + Weight::from_parts(9_773_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 3_277_000 picoseconds. + Weight::from_parts(3_358_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_429_000 picoseconds. + Weight::from_parts(5_667_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_538_000 picoseconds. + Weight::from_parts(5_803_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 89_888_000 picoseconds. + Weight::from_parts(90_929_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `171` + // Estimated: `3517` + // Minimum execution time: 6_129_000 picoseconds. + Weight::from_parts(6_414_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 52_366_000 picoseconds. + Weight::from_parts(53_500_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 67_848_000 picoseconds. + Weight::from_parts(68_910_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 107_564_000 picoseconds. + Weight::from_parts(109_377_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_timestamp.rs new file mode 100644 index 00000000000..4218dcc73f4 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_timestamp.rs @@ -0,0 +1,73 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_timestamp` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_timestamp +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_timestamp`. +pub struct WeightInfo(PhantomData); +impl pallet_timestamp::WeightInfo for WeightInfo { + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Aura::CurrentSlot` (r:1 w:0) + /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set() -> Weight { + // Proof Size summary in bytes: + // Measured: `86` + // Estimated: `1493` + // Minimum execution time: 6_306_000 picoseconds. + Weight::from_parts(6_592_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `57` + // Estimated: `0` + // Minimum execution time: 2_900_000 picoseconds. + Weight::from_parts(3_030_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs new file mode 100644 index 00000000000..5ebb0ade123 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + AccountId, AllPalletsWithSystem, ParachainInfo, Runtime, RuntimeCall, RuntimeEvent, + RuntimeOrigin, +}; +use frame_support::{ + match_types, parameter_types, + traits::{Everything, Nothing}, + weights::Weight, +}; +use xcm::latest::prelude::*; +use xcm_builder::{ + AllowExplicitUnpaidExecutionFrom, FixedWeightBounds, ParentAsSuperuser, ParentIsPreset, + SovereignSignedViaLocation, +}; + +parameter_types! { + pub const WestendLocation: MultiLocation = MultiLocation::parent(); + pub const WestendNetwork: Option = Some(NetworkId::Westend); + pub UniversalLocation: InteriorMultiLocation = X1(Parachain(ParachainInfo::parachain_id().into())); +} + +/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, +/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can +/// bias the kind of local `Origin` it will become. +pub type XcmOriginToTransactDispatchOrigin = ( + // Sovereign account converter; this attempts to derive an `AccountId` from the origin location + // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for + // foreign chains who want to have a local sovereign account on this chain which they control. + SovereignSignedViaLocation, RuntimeOrigin>, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, +); + +match_types! { + pub type JustTheParent: impl Contains = { MultiLocation { parents:1, interior: Here } }; +} + +parameter_types! { + // One XCM operation is 1_000_000_000 weight - almost certainly a conservative estimate. + pub UnitWeightCost: Weight = Weight::from_parts(1_000_000_000, 64 * 1024); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; +} + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = (); // sending XCM not supported + type AssetTransactor = (); // balances not supported + type OriginConverter = XcmOriginToTransactDispatchOrigin; + type IsReserve = (); // balances not supported + type IsTeleporter = (); // balances not supported + type UniversalLocation = UniversalLocation; + type Barrier = AllowExplicitUnpaidExecutionFrom; + type Weigher = FixedWeightBounds; // balances not supported + type Trader = (); // balances not supported + type ResponseHandler = (); // Don't handle responses for now. + type AssetTrap = (); // don't trap for now + type AssetClaims = (); // don't claim for now + type SubscriptionService = (); // don't handle subscriptions for now + type PalletInstancesInfo = AllPalletsWithSystem; + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type AssetLocker = (); + type AssetExchanger = (); + type FeeManager = (); + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; +} + +impl cumulus_pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = xcm_executor::XcmExecutor; +} diff --git a/cumulus/parachains/testnets-common/Cargo.toml b/cumulus/parachains/testnets-common/Cargo.toml new file mode 100644 index 00000000000..e39cf91d3ab --- /dev/null +++ b/cumulus/parachains/testnets-common/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "testnets-common" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +description = "Logic and configuration specific to testnet parachain runtimes" +license = "Apache-2.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +smallvec = "1.11.0" + +# Substrate +frame-support = { path = "../../../substrate/frame/support", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } + +# Polkadot +rococo-runtime-constants = { path = "../../../polkadot/runtime/rococo/constants", default-features = false} +westend-runtime-constants = { path = "../../../polkadot/runtime/westend/constants", default-features = false} +polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false} + +# Cumulus + +[dev-dependencies] + +[build-dependencies] +substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } + +[features] +default = [ "std" ] +std = [ + "frame-support/std", + "polkadot-core-primitives/std", + "rococo-runtime-constants/std", + "sp-runtime/std", + "westend-runtime-constants/std", +] + +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] diff --git a/cumulus/parachains/testnets-common/src/lib.rs b/cumulus/parachains/testnets-common/src/lib.rs new file mode 100644 index 00000000000..42d367bff27 --- /dev/null +++ b/cumulus/parachains/testnets-common/src/lib.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +/// Since the parachains-common package is now published to crates.io, SP runtimes for testnets +/// will be adapted to use this package, and their config removed from the published common +/// package. Only the configs specific to rococo, westend and wococo will be moved here, and the +/// truly common logic will still be sourced from the parachains-common package. +/// +/// In practice this just means that instead of using e.g. `[parachains_common::westend::*]`, now +/// the westend configs will be in `[testnets_common::westend::*]`. +/// +/// TODO: edit all runtimes to remove the testnet configs as part of PR #1737 +/// +pub mod rococo; +pub mod westend; +pub mod wococo; diff --git a/cumulus/parachains/testnets-common/src/rococo.rs b/cumulus/parachains/testnets-common/src/rococo.rs new file mode 100644 index 00000000000..6e31def4b55 --- /dev/null +++ b/cumulus/parachains/testnets-common/src/rococo.rs @@ -0,0 +1,119 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod currency { + use polkadot_core_primitives::Balance; + use rococo_runtime_constants as constants; + + /// The existential deposit. Set to 1/10 of its parent Relay Chain (v9010). + pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + + pub const UNITS: Balance = constants::currency::UNITS; + pub const CENTS: Balance = constants::currency::CENTS; + pub const MILLICENTS: Balance = constants::currency::MILLICENTS; + + pub const fn deposit(items: u32, bytes: u32) -> Balance { + // map to 1/100 of what the rococo relay chain charges + constants::currency::deposit(items, bytes) / 100 + } +} + +pub mod fee { + use frame_support::{ + pallet_prelude::Weight, + weights::{ + constants::ExtrinsicBaseWeight, FeePolynomial, WeightToFeeCoefficient, + WeightToFeeCoefficients, WeightToFeePolynomial, + }, + }; + use polkadot_core_primitives::Balance; + use smallvec::smallvec; + pub use sp_runtime::Perbill; + + /// The block saturation level. Fees will be updates based on this value. + pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25); + + /// Handles converting a weight scalar to a fee value, based on the scale and granularity of the + /// node's balance type. + /// + /// This should typically create a mapping between the following ranges: + /// - `[0, MAXIMUM_BLOCK_WEIGHT]` + /// - `[Balance::min, Balance::max]` + /// + /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: + /// - Setting it to `0` will essentially disable the weight fee. + /// - Setting it to `1` will cause the literal `#[weight = x]` values to be charged. + pub struct WeightToFee; + impl frame_support::weights::WeightToFee for WeightToFee { + type Balance = Balance; + + fn weight_to_fee(weight: &Weight) -> Self::Balance { + let time_poly: FeePolynomial = RefTimeToFee::polynomial().into(); + let proof_poly: FeePolynomial = ProofSizeToFee::polynomial().into(); + + // Take the maximum instead of the sum to charge by the more scarce resource. + time_poly.eval(weight.ref_time()).max(proof_poly.eval(weight.proof_size())) + } + } + + /// Maps the reference time component of `Weight` to a fee. + pub struct RefTimeToFee; + impl WeightToFeePolynomial for RefTimeToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + // In Rococo, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + // The standard system parachain configuration is 1/10 of that, as in 1/100 CENT. + let p = super::currency::CENTS; + let q = 100 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); + + smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } + } + + /// Maps the proof size component of `Weight` to a fee. + pub struct ProofSizeToFee; + impl WeightToFeePolynomial for ProofSizeToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + // Map 10kb proof to 1 CENT. + let p = super::currency::CENTS; + let q = 10_000; + + smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } + } +} + +/// Consensus-related. +pub mod consensus { + /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included + /// into the relay chain. + pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; + /// How many parachain blocks are processed by the relay chain per parent. Limits the + /// number of blocks authored per slot. + pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; + /// Relay chain slot duration, in milliseconds. + pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +} diff --git a/cumulus/parachains/testnets-common/src/westend.rs b/cumulus/parachains/testnets-common/src/westend.rs new file mode 100644 index 00000000000..0ae21e23454 --- /dev/null +++ b/cumulus/parachains/testnets-common/src/westend.rs @@ -0,0 +1,140 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Universally recognized accounts. +pub mod account { + use frame_support::PalletId; + + /// Westend treasury pallet id, used to convert into AccountId - in Westend as a destination for + /// slashed funds. + pub const WESTEND_TREASURY_PALLET_ID: PalletId = PalletId(*b"py/trsry"); + /// Alliance pallet ID - used as a temporary place to deposit a slashed imbalance before the + /// teleport to the Treasury. + pub const ALLIANCE_PALLET_ID: PalletId = PalletId(*b"py/allia"); + /// Referenda pallet ID - used as a temporary place to deposit a slashed imbalance before the + /// teleport to the Treasury. + pub const REFERENDA_PALLET_ID: PalletId = PalletId(*b"py/refer"); + /// Ambassador Referenda pallet ID - used as a temporary place to deposit a slashed imbalance + /// before the teleport to the Treasury. + pub const AMBASSADOR_REFERENDA_PALLET_ID: PalletId = PalletId(*b"py/amref"); +} + +pub mod currency { + use polkadot_core_primitives::Balance; + use westend_runtime_constants as constants; + + /// The existential deposit. Set to 1/10 of its parent Relay Chain. + pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + + pub const UNITS: Balance = constants::currency::UNITS; + pub const DOLLARS: Balance = UNITS; // 1_000_000_000_000 + pub const CENTS: Balance = constants::currency::CENTS; + pub const MILLICENTS: Balance = constants::currency::MILLICENTS; + pub const GRAND: Balance = constants::currency::GRAND; + + pub const fn deposit(items: u32, bytes: u32) -> Balance { + // 1/100 of Westend testnet + constants::currency::deposit(items, bytes) / 100 + } +} + +/// Fee-related. +pub mod fee { + use frame_support::{ + pallet_prelude::Weight, + weights::{ + constants::ExtrinsicBaseWeight, FeePolynomial, WeightToFeeCoefficient, + WeightToFeeCoefficients, WeightToFeePolynomial, + }, + }; + use polkadot_core_primitives::Balance; + use smallvec::smallvec; + pub use sp_runtime::Perbill; + + /// The block saturation level. Fees will be updated based on this value. + pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25); + + /// Handles converting a weight scalar to a fee value, based on the scale and granularity of the + /// node's balance type. + /// + /// This should typically create a mapping between the following ranges: + /// - [0, MAXIMUM_BLOCK_WEIGHT] + /// - [Balance::min, Balance::max] + /// + /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: + /// - Setting it to `0` will essentially disable the weight fee. + /// - Setting it to `1` will cause the literal `#[weight = x]` values to be charged. + pub struct WeightToFee; + impl frame_support::weights::WeightToFee for WeightToFee { + type Balance = Balance; + + fn weight_to_fee(weight: &Weight) -> Self::Balance { + let time_poly: FeePolynomial = RefTimeToFee::polynomial().into(); + let proof_poly: FeePolynomial = ProofSizeToFee::polynomial().into(); + + // Take the maximum instead of the sum to charge by the more scarce resource. + time_poly.eval(weight.ref_time()).max(proof_poly.eval(weight.proof_size())) + } + } + + /// Maps the reference time component of `Weight` to a fee. + pub struct RefTimeToFee; + impl WeightToFeePolynomial for RefTimeToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + // In Westend, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + // The standard system parachain configuration is 1/10 of that, as in 1/100 CENT. + let p = super::currency::CENTS; + let q = 100 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); + + smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } + } + + /// Maps the proof size component of `Weight` to a fee. + pub struct ProofSizeToFee; + impl WeightToFeePolynomial for ProofSizeToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + // Map 10kb proof to 1 CENT. + let p = super::currency::CENTS; + let q = 10_000; + + smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } + } +} + +/// Consensus-related. +pub mod consensus { + /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included into the + /// relay chain. + pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; + /// How many parachain blocks are processed by the relay chain per parent. Limits the number of + /// blocks authored per slot. + pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; + /// Relay chain slot duration, in milliseconds. + pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +} diff --git a/cumulus/parachains/testnets-common/src/wococo.rs b/cumulus/parachains/testnets-common/src/wococo.rs new file mode 100644 index 00000000000..5cd6121135a --- /dev/null +++ b/cumulus/parachains/testnets-common/src/wococo.rs @@ -0,0 +1,17 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// re-export rococo +pub use crate::rococo::{consensus, currency, fee}; diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index f7252a39a38..d5deda9e7bf 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -24,6 +24,7 @@ serde_json = "1.0.108" # Local rococo-parachain-runtime = { path = "../parachains/runtimes/testing/rococo-parachain" } shell-runtime = { path = "../parachains/runtimes/starters/shell" } +glutton-westend-runtime = { path = "../parachains/runtimes/glutton/glutton-westend" } glutton-runtime = { path = "../parachains/runtimes/glutton/glutton-kusama" } seedling-runtime = { path = "../parachains/runtimes/starters/seedling" } asset-hub-polkadot-runtime = { path = "../parachains/runtimes/assets/asset-hub-polkadot" } @@ -31,6 +32,7 @@ asset-hub-kusama-runtime = { path = "../parachains/runtimes/assets/asset-hub-kus asset-hub-rococo-runtime = { path = "../parachains/runtimes/assets/asset-hub-rococo" } asset-hub-westend-runtime = { path = "../parachains/runtimes/assets/asset-hub-westend" } collectives-polkadot-runtime = { path = "../parachains/runtimes/collectives/collectives-polkadot" } +collectives-westend-runtime = { path = "../parachains/runtimes/collectives/collectives-westend" } contracts-rococo-runtime = { path = "../parachains/runtimes/contracts/contracts-rococo" } bridge-hub-rococo-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-rococo" } bridge-hub-kusama-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-kusama" } @@ -119,11 +121,13 @@ runtime-benchmarks = [ "bridge-hub-rococo-runtime/runtime-benchmarks", "bridge-hub-westend-runtime/runtime-benchmarks", "collectives-polkadot-runtime/runtime-benchmarks", + "collectives-westend-runtime/runtime-benchmarks", "contracts-rococo-runtime/runtime-benchmarks", "cumulus-primitives-core/runtime-benchmarks", "frame-benchmarking-cli/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "glutton-runtime/runtime-benchmarks", + "glutton-westend-runtime/runtime-benchmarks", "parachains-common/runtime-benchmarks", "penpal-runtime/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", @@ -143,8 +147,10 @@ try-runtime = [ "bridge-hub-rococo-runtime/try-runtime", "bridge-hub-westend-runtime/try-runtime", "collectives-polkadot-runtime/try-runtime", + "collectives-westend-runtime/try-runtime", "contracts-rococo-runtime/try-runtime", "glutton-runtime/try-runtime", + "glutton-westend-runtime/try-runtime", "penpal-runtime/try-runtime", "polkadot-cli/try-runtime", "polkadot-service/try-runtime", diff --git a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs index 0a8064f50ca..07bd742fa8e 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs @@ -23,9 +23,12 @@ use sc_service::ChainType; use sp_core::sr25519; pub type CollectivesPolkadotChainSpec = sc_service::GenericChainSpec<(), Extensions>; +pub type CollectivesWestendChainSpec = sc_service::GenericChainSpec<(), Extensions>; const COLLECTIVES_POLKADOT_ED: CollectivesBalance = parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; +const COLLECTIVES_WESTEND_ED: CollectivesBalance = + parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; /// Generate the session keys from individual elements. /// @@ -158,3 +161,133 @@ fn collectives_polkadot_genesis( }, }) } + +/// Generate the session keys from individual elements. +/// +/// The input must be a tuple of individual keys (a single arg for now since we have just one key). +pub fn collectives_westend_session_keys(keys: AuraId) -> collectives_westend_runtime::SessionKeys { + collectives_westend_runtime::SessionKeys { aura: keys } +} + +pub fn collectives_westend_development_config() -> CollectivesWestendChainSpec { + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("ss58Format".into(), 42.into()); + properties.insert("tokenSymbol".into(), "WND".into()); + properties.insert("tokenDecimals".into(), 12.into()); + + CollectivesWestendChainSpec::builder( + collectives_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend-dev".into(), para_id: 1002 }, + ) + .with_name("Westend Collectives Development") + .with_id("collectives_westend_dev") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(collectives_westend_genesis( + // initial collators. + vec![( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + )], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + // 1002 avoids a potential collision with Kusama-1001 (Encointer) should there ever + // be a collective para on Kusama. + 1002.into(), + )) + .with_boot_nodes(Vec::new()) + .with_properties(properties) + .build() +} + +/// Collectives Westend Local Config. +pub fn collectives_westend_local_config() -> CollectivesWestendChainSpec { + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("ss58Format".into(), 42.into()); + properties.insert("tokenSymbol".into(), "WND".into()); + properties.insert("tokenDecimals".into(), 12.into()); + + CollectivesWestendChainSpec::builder( + collectives_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend-local".into(), para_id: 1002 }, + ) + .with_name("Westend Collectives Local") + .with_id("collectives_westend_local") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(collectives_westend_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + 1002.into(), + )) + .with_boot_nodes(Vec::new()) + .with_properties(properties) + .build() +} + +fn collectives_westend_genesis( + invulnerables: Vec<(AccountId, AuraId)>, + endowed_accounts: Vec, + id: ParaId, +) -> serde_json::Value { + serde_json::json!( { + "balances": { + "balances": endowed_accounts + .iter() + .cloned() + .map(|k| (k, COLLECTIVES_WESTEND_ED * 4096)) + .collect::>(), + }, + "parachainInfo": { + "parachainId": id, + }, + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": COLLECTIVES_WESTEND_ED * 16, + }, + "session": { + "keys": invulnerables + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + collectives_westend_session_keys(aura), // session keys + ) + }) + .collect::>(), + }, + // no need to pass anything to aura, in fact it will panic if we do. Session will take care + // of this. + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), + }, + }) +} diff --git a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs index 1a0a06404c5..aff1358d1ae 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs @@ -24,6 +24,7 @@ use super::get_collator_keys_from_seed; /// Specialized `ChainSpec` for the Glutton parachain runtime. pub type GluttonChainSpec = sc_service::GenericChainSpec<(), Extensions>; +pub type GluttonWestendChainSpec = sc_service::GenericChainSpec<(), Extensions>; pub fn glutton_development_config(para_id: ParaId) -> GluttonChainSpec { GluttonChainSpec::builder( @@ -92,3 +93,71 @@ fn glutton_genesis(parachain_id: ParaId, collators: Vec) -> serde_json:: "aura": { "authorities": collators }, }) } + +pub fn glutton_westend_development_config(para_id: ParaId) -> GluttonWestendChainSpec { + GluttonWestendChainSpec::builder( + glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend-dev".into(), para_id: para_id.into() }, + ) + .with_name("Glutton Development") + .with_id("glutton_westend_dev") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(glutton_genesis( + para_id, + vec![get_collator_keys_from_seed::("Alice")], + )) + .build() +} + +pub fn glutton_westend_local_config(para_id: ParaId) -> GluttonWestendChainSpec { + GluttonWestendChainSpec::builder( + glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend-local".into(), para_id: para_id.into() }, + ) + .with_name("Glutton Local") + .with_id("glutton_westend_local") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(glutton_genesis( + para_id, + vec![ + get_collator_keys_from_seed::("Alice"), + get_collator_keys_from_seed::("Bob"), + ], + )) + .build() +} + +pub fn glutton_westend_config(para_id: ParaId) -> GluttonWestendChainSpec { + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("ss58Format".into(), 42.into()); + + GluttonChainSpec::builder( + glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend".into(), para_id: para_id.into() }, + ) + .with_name(format!("Glutton {}", para_id).as_str()) + .with_id(format!("glutton-westend-{}", para_id).as_str()) + .with_chain_type(ChainType::Live) + .with_genesis_config_patch(glutton_westend_genesis( + para_id, + vec![ + get_collator_keys_from_seed::("Alice"), + get_collator_keys_from_seed::("Bob"), + ], + )) + .with_protocol_id(format!("glutton-westend-{}", para_id).as_str()) + .with_properties(properties) + .build() +} + +fn glutton_westend_genesis(parachain_id: ParaId, collators: Vec) -> serde_json::Value { + serde_json::json!( { + "parachainInfo": { + "parachainId": parachain_id + }, + "sudo": { + "key": Some(get_account_id_from_seed::("Alice")), + }, + "aura": { "authorities": collators }, + }) +} diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 3f93c90558a..2799175b8ee 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -50,6 +50,7 @@ enum Runtime { CollectivesPolkadot, CollectivesWestend, Glutton, + GluttonWestend, BridgeHub(chain_spec::bridge_hubs::BridgeHubRuntimeType), } @@ -111,6 +112,8 @@ fn runtime(id: &str) -> Runtime { id.parse::() .expect("Invalid value"), ) + } else if id.starts_with("glutton-westend") { + Runtime::GluttonWestend } else if id.starts_with("glutton") { Runtime::Glutton } else { @@ -219,8 +222,12 @@ fn load_spec(id: &str) -> std::result::Result, String> { Box::new(chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_bytes( &include_bytes!("../chain-specs/collectives-polkadot.json")[..], )?), + "collectives-westend-dev" => + Box::new(chain_spec::collectives::collectives_westend_development_config()), + "collectives-westend-local" => + Box::new(chain_spec::collectives::collectives_westend_local_config()), "collectives-westend" => - Box::new(chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_bytes( + Box::new(chain_spec::collectives::CollectivesWestendChainSpec::from_json_bytes( &include_bytes!("../chain-specs/collectives-westend.json")[..], )?), @@ -254,6 +261,18 @@ fn load_spec(id: &str) -> std::result::Result, String> { "polkadot-local", )), + // -- Glutton Westend + "glutton-westend-dev" => Box::new(chain_spec::glutton::glutton_westend_development_config( + para_id.expect("Must specify parachain id"), + )), + "glutton-westend-local" => Box::new(chain_spec::glutton::glutton_westend_local_config( + para_id.expect("Must specify parachain id"), + )), + // the chain spec as used for generating the upgrade genesis values + "glutton-westend-genesis" => Box::new(chain_spec::glutton::glutton_westend_config( + para_id.expect("Must specify parachain id"), + )), + // -- Glutton "glutton-kusama-dev" => Box::new(chain_spec::glutton::glutton_development_config( para_id.expect("Must specify parachain id"), @@ -288,9 +307,12 @@ fn load_spec(id: &str) -> std::result::Result, String> { Runtime::AssetHubWestend => Box::new( chain_spec::asset_hubs::AssetHubWestendChainSpec::from_json_file(path)?, ), - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => Box::new( + Runtime::CollectivesPolkadot => Box::new( chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_file(path)?, ), + Runtime::CollectivesWestend => Box::new( + chain_spec::collectives::CollectivesWestendChainSpec::from_json_file(path)?, + ), Runtime::Shell => Box::new(chain_spec::shell::ShellChainSpec::from_json_file(path)?), Runtime::Seedling => @@ -301,6 +323,8 @@ fn load_spec(id: &str) -> std::result::Result, String> { bridge_hub_runtime_type.chain_spec_from_json_file(path)?, Runtime::Penpal(_para_id) => Box::new(chain_spec::penpal::PenpalChainSpec::from_json_file(path)?), + Runtime::GluttonWestend => + Box::new(chain_spec::glutton::GluttonChainSpec::from_json_file(path)?), Runtime::Glutton => Box::new(chain_spec::glutton::GluttonChainSpec::from_json_file(path)?), Runtime::Default => Box::new( @@ -322,6 +346,10 @@ fn extract_parachain_id(id: &str) -> (&str, &str, Option) { const GLUTTON_PARA_LOCAL_PREFIX: &str = "glutton-kusama-local-"; const GLUTTON_PARA_GENESIS_PREFIX: &str = "glutton-kusama-genesis-"; + const GLUTTON_WESTEND_PARA_DEV_PREFIX: &str = "glutton-westend-dev-"; + const GLUTTON_WESTEND_PARA_LOCAL_PREFIX: &str = "glutton-westend-local-"; + const GLUTTON_WESTEND_PARA_GENESIS_PREFIX: &str = "glutton-westend-genesis-"; + let (norm_id, orig_id, para) = if let Some(suffix) = id.strip_prefix(KUSAMA_TEST_PARA_PREFIX) { let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); (&id[..KUSAMA_TEST_PARA_PREFIX.len() - 1], id, Some(para_id)) @@ -337,6 +365,15 @@ fn extract_parachain_id(id: &str) -> (&str, &str, Option) { } else if let Some(suffix) = id.strip_prefix(GLUTTON_PARA_GENESIS_PREFIX) { let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); (&id[..GLUTTON_PARA_GENESIS_PREFIX.len() - 1], id, Some(para_id)) + } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_DEV_PREFIX) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + (&id[..GLUTTON_WESTEND_PARA_DEV_PREFIX.len() - 1], id, Some(para_id)) + } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_LOCAL_PREFIX) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + (&id[..GLUTTON_WESTEND_PARA_LOCAL_PREFIX.len() - 1], id, Some(para_id)) + } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_GENESIS_PREFIX) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + (&id[..GLUTTON_WESTEND_PARA_GENESIS_PREFIX.len() - 1], id, Some(para_id)) } else { (id, id, None) }; @@ -494,13 +531,20 @@ macro_rules! construct_partials { $code }, }, - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { + Runtime::CollectivesPolkadot => { let $partials = new_partial::( &$config, crate::service::aura_build_import_queue::<_, AuraId>, )?; $code }, + Runtime::CollectivesWestend => { + let $partials = new_partial::( + &$config, + crate::service::aura_build_import_queue::<_, AuraId>, + )?; + $code + }, Runtime::Shell => { let $partials = new_partial::( &$config, @@ -529,6 +573,13 @@ macro_rules! construct_partials { )?; $code }, + Runtime::GluttonWestend => { + let $partials = new_partial::( + &$config, + crate::service::shell_build_import_queue, + )?; + $code + }, Runtime::Glutton => { let $partials = new_partial::( &$config, @@ -584,7 +635,7 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { + Runtime::CollectivesPolkadot => { runner.async_run(|$config| { let $components = new_partial::( &$config, @@ -594,6 +645,16 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, + Runtime::CollectivesWestend => { + runner.async_run(|$config| { + let $components = new_partial::( + &$config, + crate::service::aura_build_import_queue::<_, AuraId>, + )?; + let task_manager = $components.task_manager; + { $( $code )* }.map(|v| (v, task_manager)) + }) + }, Runtime::Shell => { runner.async_run(|$config| { let $components = new_partial::( @@ -705,6 +766,16 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, + Runtime::GluttonWestend => { + runner.async_run(|$config| { + let $components = new_partial::( + &$config, + crate::service::shell_build_import_queue, + )?; + let task_manager = $components.task_manager; + { $( $code )* }.map(|v| (v, task_manager)) + }) + }, Runtime::Glutton => { runner.async_run(|$config| { let $components = new_partial::( @@ -836,7 +907,7 @@ pub fn run() -> Result<()> { // that both file paths exist, the node will exit, as the user must decide (by // deleting one path) the information that they want to use as their DB. let old_name = match config.chain_spec.id() { - "asset-hub-polkadot" => Some("statemint"), + "asset-hub-polkadot" => Some("statemint"), "asset-hub-kusama" => Some("statemine"), "asset-hub-westend" => Some("westmint"), "asset-hub-rococo" => Some("rockmine"), @@ -921,7 +992,7 @@ pub fn run() -> Result<()> { .await .map(|r| r.0) .map_err(Into::into), - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => + Runtime::CollectivesPolkadot => crate::service::start_generic_aura_node::< collectives_polkadot_runtime::RuntimeApi, AuraId, @@ -929,6 +1000,14 @@ pub fn run() -> Result<()> { .await .map(|r| r.0) .map_err(Into::into), + Runtime::CollectivesWestend => + crate::service::start_generic_aura_node::< + collectives_westend_runtime::RuntimeApi, + AuraId, + >(config, polkadot_config, collator_options, id, hwbench) + .await + .map(|r| r.0) + .map_err(Into::into), Runtime::Shell => crate::service::start_shell_node::( config, @@ -962,7 +1041,7 @@ pub fn run() -> Result<()> { .map(|r| r.0) .map_err(Into::into), Runtime::BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type { - chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | +chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotDevelopment => crate::service::start_generic_aura_node::< @@ -1019,6 +1098,14 @@ pub fn run() -> Result<()> { .await .map(|r| r.0) .map_err(Into::into), + Runtime::GluttonWestend => + crate::service::start_basic_lookahead_node::< + glutton_westend_runtime::RuntimeApi, + AuraId, + >(config, polkadot_config, collator_options, id, hwbench) + .await + .map(|r| r.0) + .map_err(Into::into), Runtime::Glutton => crate::service::start_basic_lookahead_node::< glutton_runtime::RuntimeApi, diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 3bcc9b7f60d..3884fce246c 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -156,6 +156,21 @@ impl sc_executor::NativeExecutionDispatch for CollectivesPolkadotRuntimeExecutor } } +/// Native Westend Collectives executor instance. +pub struct CollectivesWestendRuntimeExecutor; + +impl sc_executor::NativeExecutionDispatch for CollectivesWestendRuntimeExecutor { + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + + fn dispatch(method: &str, data: &[u8]) -> Option> { + collectives_westend_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + collectives_westend_runtime::native_version() + } +} + /// Native BridgeHubPolkadot executor instance. pub struct BridgeHubPolkadotRuntimeExecutor; @@ -216,6 +231,21 @@ impl sc_executor::NativeExecutionDispatch for ContractsRococoRuntimeExecutor { } } +/// Native Westend Glutton executor instance. +pub struct GluttonWestendRuntimeExecutor; + +impl sc_executor::NativeExecutionDispatch for GluttonWestendRuntimeExecutor { + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + + fn dispatch(method: &str, data: &[u8]) -> Option> { + glutton_westend_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + glutton_westend_runtime::native_version() + } +} + /// Native Glutton executor instance. pub struct GluttonRuntimeExecutor; diff --git a/cumulus/scripts/benchmarks.sh b/cumulus/scripts/benchmarks.sh index 29d06905925..7da18d9440e 100755 --- a/cumulus/scripts/benchmarks.sh +++ b/cumulus/scripts/benchmarks.sh @@ -7,6 +7,7 @@ repeat=${3:-20} __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ${__dir}/benchmarks-ci.sh collectives collectives-polkadot target/$target $steps $repeat +${__dir}/benchmarks-ci.sh collectives collectives-westend target/$target $steps $repeat ${__dir}/benchmarks-ci.sh assets asset-hub-kusama target/$target $steps $repeat ${__dir}/benchmarks-ci.sh assets asset-hub-polkadot target/$target $steps $repeat @@ -17,3 +18,4 @@ ${__dir}/benchmarks-ci.sh bridge-hubs bridge-hub-kusama target/$target $steps $r ${__dir}/benchmarks-ci.sh bridge-hubs bridge-hub-rococo target/$target $steps $repeat ${__dir}/benchmarks-ci.sh glutton glutton-kusama target/$target $steps $repeat +${__dir}/benchmarks-ci.sh glutton glutton-westend target/$target $steps $repeat -- GitLab From 824b782390b16b433df89dd3ff451ea3f0ea03a1 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 15 Nov 2023 17:36:14 +0200 Subject: [PATCH 42/74] xcm: SovereignPaidRemoteExporter: remove unused RefundSurplus instruction (#2312) Refunding surplus happens anyway on xcm_executor::post_process(), automatically refunding surplus to original_origin at the end of execution. Since SovereignPaidRemoteExporter doesn't ClearOrigin, it can simply rely on the automatic mechanism. Furthermore, RefundSurplus instruction refunds _surplus_. Surplus exists only as a result of Transact, SetErrorHandler or SetAppendix instructions, none of which being part of the SovereignPaidRemoteExporter XCM program. So surplus is always zero here anyway. --- .../bridge-hubs/test-utils/src/test_cases.rs | 1 - .../src/tests/bridging/local_para_para.rs | 6 +++--- .../src/tests/bridging/local_relay_relay.rs | 4 ++-- .../tests/bridging/paid_remote_relay_relay.rs | 21 +++++++++---------- .../xcm/xcm-builder/src/universal_exports.rs | 1 - 5 files changed, 15 insertions(+), 18 deletions(-) diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs index b421eea6bcf..7a86d85c86f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs @@ -909,7 +909,6 @@ where ]), ]), }, - RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: MultiLocation { parents: 1, interior: X1(Parachain(1000)) }, diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs index 406843a0fe8..de08dbee953 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs @@ -44,7 +44,7 @@ fn sending_to_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Parent, Remote::get(), Parachain(1)).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); assert_eq!( take_received_remote_messages(), @@ -78,7 +78,7 @@ fn sending_to_parachain_of_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Parent, Remote::get(), Parachain(1000)).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); let expected = vec![( (Parent, Parachain(1000)).into(), @@ -110,7 +110,7 @@ fn sending_to_relay_chain_of_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Parent, Remote::get()).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); let expected = vec![( Parent.into(), diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs index 02c454bb212..8433b6e0212 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs @@ -41,7 +41,7 @@ fn sending_to_bridged_chain_works() { let msg = Xcm(vec![Trap(1)]); assert_eq!( send_xcm::((Parent, Remote::get()).into(), msg).unwrap().1, - (Here, 100).into() + Price::get() ); assert_eq!(TheBridge::service(), 1); let expected = vec![( @@ -68,7 +68,7 @@ fn sending_to_parachain_of_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Remote::get(), Parachain(1000)).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); let expected = vec![( Parachain(1000).into(), diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs index 45dc2d4a3b9..23d6eb99a90 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs @@ -24,6 +24,9 @@ use super::*; parameter_types! { + // 100 to use the bridge (export) and 80 for the remote execution weight (4 instructions x (10 + + // 10) weight each). + pub SendOverBridgePrice: u128 = 180u128 + if UsingTopic::get() { 20 } else { 0 }; pub UniversalLocation: Junctions = X2(GlobalConsensus(Local::get()), Parachain(100)); pub RelayUniversalLocation: Junctions = X1(GlobalConsensus(Local::get())); pub RemoteUniversalLocation: Junctions = X1(GlobalConsensus(Remote::get())); @@ -32,11 +35,9 @@ parameter_types! { Remote::get(), None, MultiLocation::parent(), - Some((Parent, 200u128 + if UsingTopic::get() { 20 } else { 0 }).into()) + Some((Parent, SendOverBridgePrice::get()).into()) ) ]; - // ^^^ 100 to use the bridge (export) and 100 for the remote execution weight (5 instructions - // x (10 + 10) weight each). } type TheBridge = TestBridge>; @@ -68,7 +69,7 @@ fn sending_to_bridged_chain_works() { clear_assets(Parachain(100)); add_asset(Parachain(100), (Here, 1000u128)); - let price = 200u128 + if UsingTopic::get() { 20 } else { 0 }; + let price = SendOverBridgePrice::get(); let msg = Xcm(vec![Trap(1)]); assert_eq!(send_xcm::(dest, msg).unwrap().1, (Parent, price).into()); @@ -86,7 +87,7 @@ fn sending_to_bridged_chain_works() { )]; assert_eq!(take_received_remote_messages(), expected); - // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of + // The export cost 40 ref time and 40 proof size weight units (and thus 80 units of // balance). assert_eq!(asset_list(Parachain(100)), vec![(Here, 1000u128 - price).into()]); @@ -104,11 +105,10 @@ fn sending_to_bridged_chain_works() { destination: Here, xcm: xcm_with_topic([0; 32], vec![Trap(1)]), }, - RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: Parachain(100).into() }, ], ), - outcome: Outcome::Complete(test_weight(5)), + outcome: Outcome::Complete(test_weight(4)), paid: true, }; assert_eq!(RoutingLog::take(), vec![entry]); @@ -143,7 +143,7 @@ fn sending_to_parachain_of_bridged_chain_works() { clear_assets(Parachain(100)); add_asset(Parachain(100), (Here, 1000u128)); - let price = 200u128 + if UsingTopic::get() { 20 } else { 0 }; + let price = SendOverBridgePrice::get(); let msg = Xcm(vec![Trap(1)]); assert_eq!(send_xcm::(dest, msg).unwrap().1, (Parent, price).into()); @@ -161,7 +161,7 @@ fn sending_to_parachain_of_bridged_chain_works() { )]; assert_eq!(take_received_remote_messages(), expected); - // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of + // The export cost 40 ref time and 40 proof size weight units (and thus 80 units of // balance). assert_eq!(asset_list(Parachain(100)), vec![(Here, 1000u128 - price).into()]); @@ -179,11 +179,10 @@ fn sending_to_parachain_of_bridged_chain_works() { destination: Parachain(100).into(), xcm: xcm_with_topic([0; 32], vec![Trap(1)]), }, - RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: Parachain(100).into() }, ], ), - outcome: Outcome::Complete(test_weight(5)), + outcome: Outcome::Complete(test_weight(4)), paid: true, }; assert_eq!(RoutingLog::take(), vec![entry]); diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index dbe9571d461..8e2cf88b3c3 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -305,7 +305,6 @@ impl Date: Wed, 15 Nov 2023 16:40:07 +0100 Subject: [PATCH 43/74] [testnet] Remove Wococo stuff from BridgeHubRococo/AssetHubRococo (#2300) Rococo<>Wococo bridge is replaced by Rococo Co-authored-by: Svyatoslav Nikolsky --- Cargo.lock | 66 --- Cargo.toml | 5 - bridges/README.md | 175 +------- bridges/bin/runtime-common/src/lib.rs | 41 +- bridges/bin/runtime-common/src/mock.rs | 46 +- .../runtime-common/src/priority_calculator.rs | 1 + bridges/docs/high-level-overview.md | 2 +- bridges/modules/grandpa/src/mock.rs | 40 +- bridges/modules/grandpa/src/weights.rs | 62 +-- bridges/modules/messages/Cargo.toml | 2 - bridges/modules/messages/src/mock.rs | 54 +-- bridges/modules/messages/src/weights.rs | 226 +++++----- bridges/modules/parachains/src/mock.rs | 41 +- bridges/modules/parachains/src/weights.rs | 122 +++--- bridges/modules/parachains/src/weights_ext.rs | 2 +- bridges/modules/relayers/Cargo.toml | 1 - bridges/modules/relayers/src/mock.rs | 44 +- bridges/modules/relayers/src/weights.rs | 2 +- .../modules/xcm-bridge-hub-router/src/mock.rs | 31 +- .../xcm-bridge-hub-router/src/weights.rs | 2 +- .../chain-asset-hub-kusama/Cargo.toml | 26 -- .../chain-asset-hub-kusama/src/lib.rs | 49 --- .../chain-asset-hub-polkadot/Cargo.toml | 28 -- .../chain-asset-hub-polkadot/src/lib.rs | 49 --- .../chain-asset-hub-rococo/src/lib.rs | 3 - .../chain-asset-hub-wococo/Cargo.toml | 26 -- .../chain-asset-hub-wococo/src/lib.rs | 48 --- .../chain-bridge-hub-cumulus/Cargo.toml | 2 +- .../chain-bridge-hub-kusama/Cargo.toml | 2 +- .../chain-bridge-hub-polkadot/Cargo.toml | 2 +- .../chain-bridge-hub-rococo/src/lib.rs | 3 - .../chain-bridge-hub-wococo/Cargo.toml | 34 -- .../chain-bridge-hub-wococo/src/lib.rs | 90 ---- bridges/primitives/chain-wococo/Cargo.toml | 34 -- bridges/primitives/chain-wococo/src/lib.rs | 68 --- bridges/primitives/runtime/src/chain.rs | 6 +- bridges/primitives/runtime/src/extensions.rs | 2 +- bridges/primitives/runtime/src/lib.rs | 35 -- bridges/scripts/verify-pallets-build.sh | 9 - bridges/zombienet/README.md | 4 +- .../helpers/wait-hrmp-channel-opened.js | 22 + bridges/zombienet/run-tests.sh | 4 +- bridges/zombienet/scripts/invoke-script.sh | 2 +- ...set-transfer-works-rococo-to-westend.zndsl | 26 ++ ...sset-transfer-works-rococo-to-wococo.zndsl | 25 -- ...et-transfer-works-westend-to-rococo.zndsl} | 19 +- bridges/zombienet/tests/0001-start-relay.sh | 2 +- .../bridges/bridge-hub-rococo/src/genesis.rs | 12 +- .../assets/asset-hub-rococo/Cargo.toml | 4 - .../assets/asset-hub-rococo/src/lib.rs | 120 +----- .../asset-hub-rococo/src/weights/mod.rs | 4 +- ...end.rs => pallet_xcm_bridge_hub_router.rs} | 56 ++- .../pallet_xcm_bridge_hub_router_to_rococo.rs | 130 ------ .../pallet_xcm_bridge_hub_router_to_wococo.rs | 130 ------ .../xcm/pallet_xcm_benchmarks_fungible.rs | 64 ++- .../xcm/pallet_xcm_benchmarks_generic.rs | 144 +++---- .../assets/asset-hub-rococo/src/xcm_config.rs | 213 +--------- .../assets/asset-hub-rococo/tests/tests.rs | 287 +------------ .../parachains/runtimes/bridge-hubs/README.md | 127 +----- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 6 - .../src/bridge_common_config.rs | 54 +-- .../src/bridge_to_rococo_config.rs | 317 -------------- .../src/bridge_to_westend_config.rs | 2 +- .../src/bridge_to_wococo_config.rs | 318 -------------- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 329 +-------------- .../bridge-hub-rococo/src/weights/mod.rs | 73 +--- .../src/weights/pallet_bridge_grandpa.rs | 89 ++-- .../pallet_bridge_grandpa_rococo_finality.rs | 82 ---- .../pallet_bridge_grandpa_westend_finality.rs | 83 ---- .../pallet_bridge_grandpa_wococo_finality.rs | 82 ---- .../src/weights/pallet_bridge_messages.rs | 312 +++++++------- ...allet_bridge_messages_rococo_to_westend.rs | 245 ----------- ...pallet_bridge_messages_rococo_to_wococo.rs | 244 ----------- ...pallet_bridge_messages_wococo_to_rococo.rs | 244 ----------- .../src/weights/pallet_bridge_parachains.rs | 128 +++--- .../pallet_bridge_parachains_within_rococo.rs | 113 ----- ...pallet_bridge_parachains_within_westend.rs | 116 ------ .../pallet_bridge_parachains_within_wococo.rs | 115 ----- .../src/weights/pallet_bridge_relayers.rs | 49 +-- .../xcm/pallet_xcm_benchmarks_fungible.rs | 87 ++-- .../xcm/pallet_xcm_benchmarks_generic.rs | 209 +++++----- .../bridge-hub-rococo/src/xcm_config.rs | 54 +-- .../bridge-hub-rococo/tests/tests.rs | 393 +----------------- .../bridge-hubs/test-utils/Cargo.toml | 4 - .../src/chain_spec/asset_hubs.rs | 87 +--- .../src/chain_spec/bridge_hubs.rs | 62 --- cumulus/polkadot-parachain/src/command.rs | 52 +-- cumulus/scripts/bridges_rococo_westend.sh | 22 +- cumulus/scripts/bridges_rococo_wococo.sh | 386 ----------------- .../bridge_hub_rococo_local_network.toml | 12 +- .../bridge_hub_westend_local_network.toml | 12 +- .../bridge_hub_wococo_local_network.toml | 94 ----- 92 files changed, 962 insertions(+), 6389 deletions(-) delete mode 100644 bridges/primitives/chain-asset-hub-kusama/Cargo.toml delete mode 100644 bridges/primitives/chain-asset-hub-kusama/src/lib.rs delete mode 100644 bridges/primitives/chain-asset-hub-polkadot/Cargo.toml delete mode 100644 bridges/primitives/chain-asset-hub-polkadot/src/lib.rs delete mode 100644 bridges/primitives/chain-asset-hub-wococo/Cargo.toml delete mode 100644 bridges/primitives/chain-asset-hub-wococo/src/lib.rs delete mode 100644 bridges/primitives/chain-bridge-hub-wococo/Cargo.toml delete mode 100644 bridges/primitives/chain-bridge-hub-wococo/src/lib.rs delete mode 100644 bridges/primitives/chain-wococo/Cargo.toml delete mode 100644 bridges/primitives/chain-wococo/src/lib.rs create mode 100644 bridges/zombienet/helpers/wait-hrmp-channel-opened.js create mode 100644 bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl delete mode 100644 bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-wococo.zndsl rename bridges/zombienet/tests/{0001-asset-transfer-works-wococo-to-rococo.zndsl => 0001-asset-transfer-works-westend-to-rococo.zndsl} (64%) mode change 100644 => 100755 bridges/zombienet/tests/0001-start-relay.sh rename cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/{pallet_xcm_bridge_hub_router_to_westend.rs => pallet_xcm_bridge_hub_router.rs} (75%) delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_rococo.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_wococo.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_rococo_config.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_wococo_config.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_rococo_finality.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_westend_finality.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_wococo_finality.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_wococo.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_wococo_to_rococo.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_rococo.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_westend.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_wococo.rs delete mode 100755 cumulus/scripts/bridges_rococo_wococo.sh delete mode 100644 cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml diff --git a/Cargo.lock b/Cargo.lock index 6b52e62d742..0c374a90a18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -921,10 +921,8 @@ dependencies = [ "assets-common", "bp-asset-hub-rococo", "bp-asset-hub-westend", - "bp-asset-hub-wococo", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", - "bp-bridge-hub-wococo", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", @@ -1690,27 +1688,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "bp-asset-hub-kusama" -version = "0.1.0" -dependencies = [ - "bp-xcm-bridge-hub-router", - "frame-support", - "parity-scale-codec", - "scale-info", -] - -[[package]] -name = "bp-asset-hub-polkadot" -version = "0.1.0" -dependencies = [ - "bp-xcm-bridge-hub-router", - "frame-support", - "parity-scale-codec", - "scale-info", - "sp-runtime", -] - [[package]] name = "bp-asset-hub-rococo" version = "0.1.0" @@ -1731,16 +1708,6 @@ dependencies = [ "scale-info", ] -[[package]] -name = "bp-asset-hub-wococo" -version = "0.1.0" -dependencies = [ - "bp-xcm-bridge-hub-router", - "frame-support", - "parity-scale-codec", - "scale-info", -] - [[package]] name = "bp-bridge-hub-cumulus" version = "0.1.0" @@ -1807,19 +1774,6 @@ dependencies = [ "sp-std 8.0.0", ] -[[package]] -name = "bp-bridge-hub-wococo" -version = "0.1.0" -dependencies = [ - "bp-bridge-hub-cumulus", - "bp-messages", - "bp-runtime", - "frame-support", - "sp-api", - "sp-runtime", - "sp-std 8.0.0", -] - [[package]] name = "bp-header-chain" version = "0.1.0" @@ -2011,19 +1965,6 @@ dependencies = [ "sp-std 8.0.0", ] -[[package]] -name = "bp-wococo" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "bp-polkadot-core", - "bp-rococo", - "bp-runtime", - "frame-support", - "sp-api", - "sp-std 8.0.0", -] - [[package]] name = "bp-xcm-bridge-hub-router" version = "0.1.0" @@ -2205,10 +2146,8 @@ version = "0.1.0" dependencies = [ "bp-asset-hub-rococo", "bp-asset-hub-westend", - "bp-asset-hub-wococo", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", - "bp-bridge-hub-wococo", "bp-header-chain", "bp-messages", "bp-parachains", @@ -2217,7 +2156,6 @@ dependencies = [ "bp-rococo", "bp-runtime", "bp-westend", - "bp-wococo", "bridge-hub-test-utils", "bridge-runtime-common", "cumulus-pallet-aura-ext", @@ -2291,8 +2229,6 @@ name = "bridge-hub-test-utils" version = "0.1.0" dependencies = [ "asset-test-utils", - "bp-bridge-hub-rococo", - "bp-bridge-hub-wococo", "bp-header-chain", "bp-messages", "bp-parachains", @@ -9661,7 +9597,6 @@ dependencies = [ "pallet-balances", "parity-scale-codec", "scale-info", - "sp-core", "sp-io", "sp-runtime", "sp-std 8.0.0", @@ -9706,7 +9641,6 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-arithmetic", - "sp-core", "sp-io", "sp-runtime", "sp-std 8.0.0", diff --git a/Cargo.toml b/Cargo.toml index 27351c09581..ed252e07053 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,23 +14,18 @@ members = [ "bridges/modules/parachains", "bridges/modules/relayers", "bridges/modules/xcm-bridge-hub-router", - "bridges/primitives/chain-asset-hub-kusama", - "bridges/primitives/chain-asset-hub-polkadot", "bridges/primitives/chain-asset-hub-rococo", "bridges/primitives/chain-asset-hub-westend", - "bridges/primitives/chain-asset-hub-wococo", "bridges/primitives/chain-bridge-hub-cumulus", "bridges/primitives/chain-bridge-hub-kusama", "bridges/primitives/chain-bridge-hub-polkadot", "bridges/primitives/chain-bridge-hub-rococo", "bridges/primitives/chain-bridge-hub-westend", - "bridges/primitives/chain-bridge-hub-wococo", "bridges/primitives/chain-kusama", "bridges/primitives/chain-polkadot", "bridges/primitives/chain-polkadot-bulletin", "bridges/primitives/chain-rococo", "bridges/primitives/chain-westend", - "bridges/primitives/chain-wococo", "bridges/primitives/header-chain", "bridges/primitives/messages", "bridges/primitives/parachains", diff --git a/bridges/README.md b/bridges/README.md index da46fe67d92..a2ce213d254 100644 --- a/bridges/README.md +++ b/bridges/README.md @@ -68,7 +68,7 @@ For example, consider the case below where we want to bridge two Substrate based ``` +---------------+ +---------------+ | | | | -| Rialto | | Millau | +| Rococo | | Westend | | | | | +-------+-------+ +-------+-------+ ^ ^ @@ -79,9 +79,9 @@ For example, consider the case below where we want to bridge two Substrate based +---------------+ ``` -The Millau chain must be able to accept Rialto headers and verify their integrity. It does this by using a runtime +The Rococo chain must be able to accept Westend headers and verify their integrity. It does this by using a runtime module designed to track GRANDPA finality. Since two blockchains can't interact directly they need an external service, -called a relayer, to communicate. The relayer will subscribe to new Rialto headers via RPC and submit them to the Millau +called a relayer, to communicate. The relayer will subscribe to new Rococo headers via RPC and submit them to the Westend chain for verification. Take a look at [Bridge High Level Documentation](./docs/high-level-overview.md) for more in-depth description of the @@ -94,164 +94,23 @@ Here's an overview of how the project is laid out. The main bits are the `bin`, messages between chains. ``` -├── bin // Node and Runtime for the various Substrate chains -│ └── ... -├── deployments // Useful tools for deploying test networks +├── modules // Substrate Runtime Modules (a.k.a Pallets) +│ ├── beefy // On-Chain BEEFY Light Client (in progress) +│ ├── grandpa // On-Chain GRANDPA Light Client +│ ├── messages // Cross Chain Message Passing +│ ├── parachains // On-Chain Parachains Light Client +│ ├── relayers // Relayer Rewards Registry +│ ├── xcm-bridge-hub // Multiple Dynamic Bridges Support +│ ├── xcm-bridge-hub-router // XCM Router that may be used to Connect to XCM Bridge Hub +├── primitives // Code shared between modules, runtimes, and relays │ └── ... -├── modules // Substrate Runtime Modules (a.k.a Pallets) -│ ├── beefy // On-Chain BEEFY Light Client (in progress) -│ ├── grandpa // On-Chain GRANDPA Light Client -│ ├── messages // Cross Chain Message Passing -│ ├── parachains // On-Chain Parachains Light Client -│ ├── relayers // Relayer rewards registry +├── relays // Application for sending finality proofs and messages between chains │ └── ... -├── primitives // Code shared between modules, runtimes, and relays -│ └── ... -├── relays // Application for sending finality proofs and messages between chains -│ └── ... -└── scripts // Useful development and maintenance scripts +└── scripts // Useful development and maintenance scripts ``` ## Running the Bridge -To run the Bridge you need to be able to connect the bridge relay node to the RPC interface of nodes on each side of the -bridge (source and target chain). - -There are 2 ways to run the bridge, described below: - -- building & running from source: with this option, you'll be able to run the bridge between two standalone chains that -are running GRANDPA finality gadget to achieve finality; - -- running a Docker Compose setup: this is a recommended option, where you'll see bridges with parachains, complex relays -and more. - -### Using the Source - -First you'll need to build the bridge nodes and relay. This can be done as follows: - -```bash -# In `parity-bridges-common` folder -cargo build -p rialto-bridge-node -cargo build -p millau-bridge-node -cargo build -p substrate-relay -``` - -### Running a Dev network - -We will launch a dev network to demonstrate how to relay a message between two Substrate based chains (named Rialto and -Millau). - -To do this we will need two nodes, two relayers which will relay headers, and two relayers which will relay messages. - -#### Running from local scripts - -To run a simple dev network you can use the scripts located in the [`deployments/local-scripts` -folder](./deployments/local-scripts). - -First, we must run the two Substrate nodes. - -```bash -# In `parity-bridges-common` folder -./deployments/local-scripts/run-rialto-node.sh -./deployments/local-scripts/run-millau-node.sh -``` - -After the nodes are up we can run the header relayers. - -```bash -./deployments/local-scripts/relay-millau-to-rialto.sh -./deployments/local-scripts/relay-rialto-to-millau.sh -``` - -At this point you should see the relayer submitting headers from the Millau Substrate chain to the Rialto Substrate -chain. - -``` -# Header Relayer Logs -[Millau_to_Rialto_Sync] [date] DEBUG bridge Going to submit finality proof of Millau header #147 to Rialto -[...] [date] INFO bridge Synced 147 of 147 headers -[...] [date] DEBUG bridge Going to submit finality proof of Millau header #148 to Rialto -[...] [date] INFO bridge Synced 148 of 149 headers -``` - -Finally, we can run the message relayers. - -```bash -./deployments/local-scripts/relay-messages-millau-to-rialto.sh -./deployments/local-scripts/relay-messages-rialto-to-millau.sh -``` - -You will also see the message lane relayers listening for new messages. - -``` -# Message Relayer Logs -[Millau_to_Rialto_MessageLane_00000000] [date] DEBUG bridge Asking Millau::ReceivingConfirmationsDelivery about best message nonces -[...] [date] INFO bridge Synced Some(2) of Some(3) nonces in Millau::MessagesDelivery -> Rialto::MessagesDelivery race -[...] [date] DEBUG bridge Asking Millau::MessagesDelivery about message nonces -[...] [date] DEBUG bridge Received best nonces from Millau::ReceivingConfirmationsDelivery: TargetClientNonces { - latest_nonce: 0, nonces_data: () } -[...] [date] DEBUG bridge Asking Millau::ReceivingConfirmationsDelivery about finalized message nonces -[...] [date] DEBUG bridge Received finalized nonces from Millau::ReceivingConfirmationsDelivery: TargetClientNonces { - latest_nonce: 0, nonces_data: () } -[...] [date] DEBUG bridge Received nonces from Millau::MessagesDelivery: SourceClientNonces { new_nonces: {}, confirmed_nonce: Some(0) } -[...] [date] DEBUG bridge Asking Millau node about its state -[...] [date] DEBUG bridge Received state from Millau node: ClientState { best_self: HeaderId(1593, 0xacac***), best_finalized_self: - HeaderId(1590, 0x0be81d...), best_finalized_peer_at_best_self: HeaderId(0, 0xdcdd89...) } -``` - -To send a message see the ["How to send a message" section](#how-to-send-a-message). - -### How to send a message - -In this section we'll show you how to quickly send a bridge message. The message is just an encoded XCM `Trap(43)` -message. - -```bash -# In `parity-bridges-common` folder -./scripts/send-message-from-millau-rialto.sh -``` - -After sending a message you will see the following logs showing a message was successfully sent: - -``` -INFO bridge Sending message to Rialto. Size: 11. -TRACE bridge Sent transaction to Millau node: 0x5e68... -``` - -And at the Rialto node logs you'll something like this: - -``` -... runtime::bridge-messages: Received messages: total=1, valid=1. Weight used: Weight(ref_time: 1215065371, proof_size: - 48559)/Weight(ref_time: 1215065371, proof_size: 54703). -``` - -It means that the message has been delivered and dispatched. Message may be dispatched with an error, though - the goal -of our test bridge is to ensure that messages are successfully delivered and all involved components are working. - -## Full Network Docker Compose Setup - -For a more sophisticated deployment which includes bidirectional header sync, message passing, monitoring dashboards, -etc. see the [Deployments README](./deployments/README.md). - -You should note that you can find images for all the bridge components published on [Docker -Hub](https://hub.docker.com/u/paritytech). - -To run a Rialto node for example, you can use the following command: - -```bash -docker run -p 30333:30333 -p 9933:9933 -p 9944:9944 \ - -it paritytech/rialto-bridge-node --dev --tmp \ - --rpc-cors=all --unsafe-rpc-external -``` - -## Community - -Main hangout for the community is [Element](https://element.io/) (formerly Riot). Element is a chat server like, for -example, Discord. Most discussions around Polkadot and Substrate happen in various Element "rooms" (channels). So, -joining Element might be a good idea, anyway. - -If you are interested in information exchange and development of Polkadot related bridges please feel free to join the -[Polkadot Bridges](https://app.element.io/#/room/#bridges:web3.foundation) Element channel. - -The [Substrate Technical](https://app.element.io/#/room/#substrate-technical:matrix.org) Element channel is most suited -for discussions regarding Substrate itself. +Apart from live Rococo <> Westend bridge, you may spin up local networks and test see how it works locally. More +details may be found in +[this document](https://github.com/paritytech/polkadot-sdk/tree/master//cumulus/parachains/runtimes/bridge-hubs/README.md). diff --git a/bridges/bin/runtime-common/src/lib.rs b/bridges/bin/runtime-common/src/lib.rs index ae6f40b1421..d3b3b21061d 100644 --- a/bridges/bin/runtime-common/src/lib.rs +++ b/bridges/bin/runtime-common/src/lib.rs @@ -22,7 +22,6 @@ use crate::messages_call_ext::MessagesCallSubType; use pallet_bridge_grandpa::CallSubType as GrandpaCallSubType; use pallet_bridge_parachains::CallSubType as ParachainsCallSubtype; use sp_runtime::transaction_validity::TransactionValidity; -use xcm::v3::NetworkId; pub mod messages; pub mod messages_api; @@ -92,8 +91,8 @@ where /// ```nocompile /// generate_bridge_reject_obsolete_headers_and_messages!{ /// Call, AccountId -/// BridgeRialtoGrandpa, BridgeWestendGrandpa, -/// BridgeRialtoParachains +/// BridgeRococoGrandpa, BridgeRococoMessages, +/// BridgeRococoParachains /// } /// ``` /// @@ -147,42 +146,6 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { }; } -/// A mapping over `NetworkId`. -/// Since `NetworkId` doesn't include `Millau`, `Rialto` and `RialtoParachain`, we create some -/// synthetic associations between these chains and `NetworkId` chains. -pub enum CustomNetworkId { - /// The Millau network ID, associated with Kusama. - Millau, - /// The Rialto network ID, associated with Polkadot. - Rialto, - /// The RialtoParachain network ID, associated with Westend. - RialtoParachain, -} - -impl TryFrom for CustomNetworkId { - type Error = (); - - fn try_from(chain: bp_runtime::ChainId) -> Result { - Ok(match chain { - bp_runtime::MILLAU_CHAIN_ID => Self::Millau, - bp_runtime::RIALTO_CHAIN_ID => Self::Rialto, - bp_runtime::RIALTO_PARACHAIN_CHAIN_ID => Self::RialtoParachain, - _ => return Err(()), - }) - } -} - -impl CustomNetworkId { - /// Converts self to XCM' network id. - pub const fn as_network_id(&self) -> NetworkId { - match *self { - CustomNetworkId::Millau => NetworkId::Kusama, - CustomNetworkId::Rialto => NetworkId::Polkadot, - CustomNetworkId::RialtoParachain => NetworkId::Westend, - } - } -} - #[cfg(test)] mod tests { use crate::BridgeRuntimeFilterCall; diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index 67ae974668e..ffabf7f6e2f 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -14,12 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! A mock runtime for testing different stuff in the crate. We've been using Millau -//! runtime for that before, but it has two drawbacks: -//! -//! - circular dependencies between this crate and Millau runtime; -//! -//! - we can't use (e.g. as git subtree or by copying) this crate in repo without Millau. +//! A mock runtime for testing different stuff in the crate. #![cfg(test)] @@ -44,13 +39,13 @@ use bp_runtime::{ }; use codec::{Decode, Encode}; use frame_support::{ - parameter_types, + derive_impl, parameter_types, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight}, }; use pallet_transaction_payment::Multiplier; use sp_runtime::{ testing::H256, - traits::{BlakeTwo256, ConstU32, ConstU64, ConstU8, IdentityLookup}, + traits::{BlakeTwo256, ConstU32, ConstU64, ConstU8}, FixedPointNumber, Perquintill, }; @@ -146,30 +141,14 @@ parameter_types! { pub const ReserveId: [u8; 8] = *b"brdgrlrs"; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Hash = ThisChainHash; type Hashing = ThisChainHasher; type AccountId = ThisChainAccountId; - type Lookup = IdentityLookup; type Block = ThisChainBlock; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU32<250>; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = DbWeight; - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; + type BlockHashCount = ConstU32<250>; } impl pallet_utility::Config for TestRuntime { @@ -179,21 +158,10 @@ impl pallet_utility::Config for TestRuntime { type WeightInfo = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for TestRuntime { - type Balance = ThisChainBalance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); - type MaxLocks = ConstU32<50>; - type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; + type AccountStore = System; } impl pallet_transaction_payment::Config for TestRuntime { diff --git a/bridges/bin/runtime-common/src/priority_calculator.rs b/bridges/bin/runtime-common/src/priority_calculator.rs index fd103448125..a597fb9e2f4 100644 --- a/bridges/bin/runtime-common/src/priority_calculator.rs +++ b/bridges/bin/runtime-common/src/priority_calculator.rs @@ -27,6 +27,7 @@ use frame_support::traits::Get; use sp_runtime::transaction_validity::TransactionPriority; // reexport everything from `integrity_tests` module +#[allow(unused_imports)] pub use integrity_tests::*; /// Compute priority boost for message delivery transaction that delivers diff --git a/bridges/docs/high-level-overview.md b/bridges/docs/high-level-overview.md index 42efc8100bd..d6d6fb3f099 100644 --- a/bridges/docs/high-level-overview.md +++ b/bridges/docs/high-level-overview.md @@ -1,7 +1,7 @@ # High-Level Bridge Documentation This document gives a brief, abstract description of main components that may be found in this repository. If you want -to see how we're using them to build Rococo <> Wococo (Kusama <> Polkadot) bridge, please refer to the [Polkadot <> +to see how we're using them to build Rococo <> Westend (Kusama <> Polkadot) bridge, please refer to the [Polkadot <> Kusama Bridge](./polkadot-kusama-bridge-overview.md). ## Purpose diff --git a/bridges/modules/grandpa/src/mock.rs b/bridges/modules/grandpa/src/mock.rs index f88a0a3e6a6..7efa84971fe 100644 --- a/bridges/modules/grandpa/src/mock.rs +++ b/bridges/modules/grandpa/src/mock.rs @@ -20,16 +20,9 @@ use bp_header_chain::ChainWithGrandpa; use bp_runtime::Chain; use frame_support::{ - construct_runtime, parameter_types, - traits::{ConstU32, ConstU64, Hooks}, - weights::Weight, + construct_runtime, derive_impl, parameter_types, traits::Hooks, weights::Weight, }; use sp_core::sr25519::Signature; -use sp_runtime::{ - testing::H256, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; pub type AccountId = u64; pub type TestHeader = sp_runtime::testing::Header; @@ -49,43 +42,14 @@ construct_runtime! { } } -parameter_types! { - pub const MaximumBlockWeight: Weight = Weight::from_parts(1024, 0); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } parameter_types! { pub const MaxFreeMandatoryHeadersPerBlock: u32 = 2; pub const HeadersToKeep: u32 = 5; - pub const SessionLength: u64 = 5; - pub const NumValidators: u32 = 5; } impl grandpa::Config for TestRuntime { diff --git a/bridges/modules/grandpa/src/weights.rs b/bridges/modules/grandpa/src/weights.rs index 89ed70d13ac..a75e7b5a8e4 100644 --- a/bridges/modules/grandpa/src/weights.rs +++ b/bridges/modules/grandpa/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/unknown-bridge-node // benchmark // pallet // --chain=dev @@ -58,39 +58,39 @@ pub trait WeightInfo { /// Those weights are test only and must never be used in production. pub struct BridgeWeight(PhantomData); impl WeightInfo for BridgeWeight { - /// Storage: BridgeRialtoGrandpa PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownGrandpa PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa RequestCount (r:1 w:1) + /// Storage: BridgeUnknownGrandpa RequestCount (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: 499, - /// mode: MaxEncodedLen) + /// Proof: BridgeUnknownGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: + /// 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa BestFinalized (r:1 w:1) + /// Storage: BridgeUnknownGrandpa BestFinalized (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: + /// Proof: BridgeUnknownGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: /// 531, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa CurrentAuthoritySet (r:1 w:0) + /// Storage: BridgeUnknownGrandpa CurrentAuthoritySet (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), + /// Proof: BridgeUnknownGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), /// added: 704, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashesPointer (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashesPointer (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), + /// Proof: BridgeUnknownGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), /// added: 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashes (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashes (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), + /// Proof: BridgeUnknownGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), /// added: 2016, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:0 w:2) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:0 w:2) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 4]`. @@ -113,39 +113,39 @@ impl WeightInfo for BridgeWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: BridgeRialtoGrandpa PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownGrandpa PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa RequestCount (r:1 w:1) + /// Storage: BridgeUnknownGrandpa RequestCount (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: 499, - /// mode: MaxEncodedLen) + /// Proof: BridgeUnknownGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: + /// 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa BestFinalized (r:1 w:1) + /// Storage: BridgeUnknownGrandpa BestFinalized (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: + /// Proof: BridgeUnknownGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: /// 531, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa CurrentAuthoritySet (r:1 w:0) + /// Storage: BridgeUnknownGrandpa CurrentAuthoritySet (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), + /// Proof: BridgeUnknownGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), /// added: 704, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashesPointer (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashesPointer (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), + /// Proof: BridgeUnknownGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), /// added: 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashes (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashes (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), + /// Proof: BridgeUnknownGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), /// added: 2016, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:0 w:2) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:0 w:2) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 4]`. diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index d3d68b33802..a5c86693309 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -22,7 +22,6 @@ bp-runtime = { path = "../../primitives/runtime", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } @@ -43,7 +42,6 @@ std = [ "log/std", "num-traits/std", "scale-info/std", - "sp-core/std", "sp-runtime/std", "sp-std/std", ] diff --git a/bridges/modules/messages/src/mock.rs b/bridges/modules/messages/src/mock.rs index e98f9e1f5de..648acad772d 100644 --- a/bridges/modules/messages/src/mock.rs +++ b/bridges/modules/messages/src/mock.rs @@ -34,16 +34,11 @@ use bp_messages::{ use bp_runtime::{messages::MessageDispatchResult, Size}; use codec::{Decode, Encode}; use frame_support::{ - parameter_types, - traits::ConstU64, + derive_impl, parameter_types, weights::{constants::RocksDbWeight, Weight}, }; use scale_info::TypeInfo; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, ConstU32, IdentityLookup}, - BuildStorage, Perbill, -}; +use sp_runtime::BuildStorage; use std::{ collections::{BTreeMap, VecDeque}, ops::RangeInclusive, @@ -84,56 +79,19 @@ frame_support::construct_runtime! { } } -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = Weight::from_parts(1024, 0); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - pub type DbWeight = RocksDbWeight; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); type DbWeight = DbWeight; - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for TestRuntime { - type MaxLocks = (); - type Balance = Balance; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; - type AccountStore = frame_system::Pallet; - type WeightInfo = (); - type MaxReserves = (); - type ReserveIdentifier = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; + type ReserveIdentifier = [u8; 8]; + type AccountStore = System; } parameter_types! { diff --git a/bridges/modules/messages/src/weights.rs b/bridges/modules/messages/src/weights.rs index 5b6863984ec..5bf7d567560 100644 --- a/bridges/modules/messages/src/weights.rs +++ b/bridges/modules/messages/src/weights.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Autogenerated weights for RialtoMessages +//! Autogenerated weights for pallet_bridge_messages //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2023-03-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -23,13 +23,13 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/unknown-bridge-node // benchmark // pallet // --chain=dev // --steps=50 // --repeat=20 -// --pallet=RialtoMessages +// --pallet=pallet_bridge_messages // --extrinsic=* // --execution=wasm // --wasm-execution=Compiled @@ -48,7 +48,7 @@ use frame_support::{ }; use sp_std::marker::PhantomData; -/// Weight functions needed for RialtoMessages. +/// Weight functions needed for pallet_bridge_messages. pub trait WeightInfo { fn receive_single_message_proof() -> Weight; fn receive_two_messages_proof() -> Weight; @@ -61,24 +61,24 @@ pub trait WeightInfo { fn receive_single_message_proof_with_dispatch(i: u32) -> Weight; } -/// Weights for `RialtoMessages` that are generated using one of the Bridge testnets. +/// Weights for `pallet_bridge_messages` that are generated using one of the Bridge testnets. /// /// Those weights are test only and must never be used in production. pub struct BridgeWeight(PhantomData); impl WeightInfo for BridgeWeight { - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: @@ -89,19 +89,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_two_messages_proof() -> Weight { // Proof Size summary in bytes: @@ -112,19 +112,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: @@ -135,19 +135,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_1_kb() -> Weight { // Proof Size summary in bytes: @@ -158,19 +158,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_16_kb() -> Weight { // Proof Size summary in bytes: @@ -181,19 +181,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -209,19 +209,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -237,19 +237,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) @@ -265,19 +265,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) /// /// The range of component `i` is `[128, 2048]`. @@ -296,19 +296,19 @@ impl WeightInfo for BridgeWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: @@ -319,19 +319,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_two_messages_proof() -> Weight { // Proof Size summary in bytes: @@ -342,19 +342,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: @@ -365,19 +365,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_1_kb() -> Weight { // Proof Size summary in bytes: @@ -388,19 +388,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_16_kb() -> Weight { // Proof Size summary in bytes: @@ -411,19 +411,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -439,19 +439,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -467,19 +467,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) @@ -495,19 +495,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) /// /// The range of component `i` is `[128, 2048]`. diff --git a/bridges/modules/parachains/src/mock.rs b/bridges/modules/parachains/src/mock.rs index 14afe384171..d95e76f3108 100644 --- a/bridges/modules/parachains/src/mock.rs +++ b/bridges/modules/parachains/src/mock.rs @@ -17,17 +17,18 @@ use bp_header_chain::ChainWithGrandpa; use bp_polkadot_core::parachains::ParaId; use bp_runtime::{Chain, Parachain}; -use frame_support::{construct_runtime, parameter_types, traits::ConstU32, weights::Weight}; +use frame_support::{ + construct_runtime, derive_impl, parameter_types, traits::ConstU32, weights::Weight, +}; use sp_runtime::{ testing::H256, - traits::{BlakeTwo256, Header as HeaderT, IdentityLookup}, - MultiSignature, Perbill, + traits::{BlakeTwo256, Header as HeaderT}, + MultiSignature, }; use crate as pallet_bridge_parachains; pub type AccountId = u64; -pub type TestNumber = u64; pub type RelayBlockHeader = sp_runtime::generic::Header; @@ -152,42 +153,12 @@ construct_runtime! { } } -parameter_types! { - pub const BlockHashCount: TestNumber = 250; - pub const MaximumBlockWeight: Weight = Weight::from_parts(1024, 0); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Block = Block; - type Hash = H256; - type Hashing = RegularParachainHasher; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } parameter_types! { - pub const SessionLength: u64 = 5; - pub const NumValidators: u32 = 5; pub const HeadersToKeep: u32 = 5; } diff --git a/bridges/modules/parachains/src/weights.rs b/bridges/modules/parachains/src/weights.rs index 9182ec46611..abddc876894 100644 --- a/bridges/modules/parachains/src/weights.rs +++ b/bridges/modules/parachains/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/unknown-bridge-node // benchmark // pallet // --chain=dev @@ -60,29 +60,29 @@ pub trait WeightInfo { /// Those weights are test only and must never be used in production. pub struct BridgeWeight(PhantomData); impl WeightInfo for BridgeWeight { - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 2]`. @@ -97,29 +97,29 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: @@ -130,29 +130,29 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: @@ -167,29 +167,29 @@ impl WeightInfo for BridgeWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 2]`. @@ -204,29 +204,29 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: @@ -237,29 +237,29 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: diff --git a/bridges/modules/parachains/src/weights_ext.rs b/bridges/modules/parachains/src/weights_ext.rs index 13bc9ad2bbc..393086a8569 100644 --- a/bridges/modules/parachains/src/weights_ext.rs +++ b/bridges/modules/parachains/src/weights_ext.rs @@ -31,7 +31,7 @@ use frame_support::weights::{RuntimeDbWeight, Weight}; pub const DEFAULT_PARACHAIN_HEAD_SIZE: u32 = 384; /// Number of extra bytes (excluding size of storage value itself) of storage proof, built at -/// the Rialto chain. +/// some generic chain. pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; /// Extended weight info. diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index 10b60c3006b..6ec1971e3f6 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -30,7 +30,6 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false [dev-dependencies] bp-runtime = { path = "../../primitives/runtime" } pallet-balances = { path = "../../../substrate/frame/balances" } -sp-core = { path = "../../../substrate/primitives/core" } sp-io = { path = "../../../substrate/primitives/io" } sp-runtime = { path = "../../../substrate/primitives/runtime" } diff --git a/bridges/modules/relayers/src/mock.rs b/bridges/modules/relayers/src/mock.rs index d19d47eec5c..667b10e5c12 100644 --- a/bridges/modules/relayers/src/mock.rs +++ b/bridges/modules/relayers/src/mock.rs @@ -22,12 +22,10 @@ use bp_messages::LaneId; use bp_relayers::{ PayRewardFromAccount, PaymentProcedure, RewardsAccountOwner, RewardsAccountParams, }; -use frame_support::{parameter_types, traits::fungible::Mutate, weights::RuntimeDbWeight}; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, ConstU32, IdentityLookup}, - BuildStorage, +use frame_support::{ + derive_impl, parameter_types, traits::fungible::Mutate, weights::RuntimeDbWeight, }; +use sp_runtime::BuildStorage; pub type AccountId = u64; pub type Balance = u64; @@ -61,47 +59,17 @@ parameter_types! { pub const Lease: BlockNumber = 8; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Block = Block; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = frame_support::traits::ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); type DbWeight = DbWeight; - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for TestRuntime { - type MaxLocks = (); - type Balance = Balance; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = frame_system::Pallet; - type WeightInfo = (); - type MaxReserves = ConstU32<1>; type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; + type AccountStore = System; } impl pallet_bridge_relayers::Config for TestRuntime { diff --git a/bridges/modules/relayers/src/weights.rs b/bridges/modules/relayers/src/weights.rs index 2e064a3936d..c2c065b0c0a 100644 --- a/bridges/modules/relayers/src/weights.rs +++ b/bridges/modules/relayers/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/rip-bridge-node // benchmark // pallet // --chain=dev diff --git a/bridges/modules/xcm-bridge-hub-router/src/mock.rs b/bridges/modules/xcm-bridge-hub-router/src/mock.rs index 2152b4eb28f..2d173ebc045 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/mock.rs @@ -19,13 +19,9 @@ use crate as pallet_xcm_bridge_hub_router; use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; -use frame_support::{construct_runtime, parameter_types}; +use frame_support::{construct_runtime, derive_impl, parameter_types}; use frame_system::EnsureRoot; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, ConstU128, IdentityLookup}, - BuildStorage, -}; +use sp_runtime::{traits::ConstU128, BuildStorage}; use xcm::prelude::*; use xcm_builder::{NetworkExportTable, NetworkExportTableItem}; @@ -64,30 +60,9 @@ parameter_types! { ]; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Block = Block; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = frame_support::traits::ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { diff --git a/bridges/modules/xcm-bridge-hub-router/src/weights.rs b/bridges/modules/xcm-bridge-hub-router/src/weights.rs index 62936e997f3..b0c8fc6252c 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/weights.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/rip-bridge-node // benchmark // pallet // --chain=dev diff --git a/bridges/primitives/chain-asset-hub-kusama/Cargo.toml b/bridges/primitives/chain-asset-hub-kusama/Cargo.toml deleted file mode 100644 index 3e53f9407ff..00000000000 --- a/bridges/primitives/chain-asset-hub-kusama/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "bp-asset-hub-kusama" -description = "Primitives of AssetHubKusama parachain runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } - -# Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-xcm-bridge-hub-router/std", - "codec/std", - "frame-support/std", - "scale-info/std", -] diff --git a/bridges/primitives/chain-asset-hub-kusama/src/lib.rs b/bridges/primitives/chain-asset-hub-kusama/src/lib.rs deleted file mode 100644 index 94016c1da0c..00000000000 --- a/bridges/primitives/chain-asset-hub-kusama/src/lib.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects AssetHubKusama runtime setup. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, Encode}; -use scale_info::TypeInfo; - -pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; - -/// `AssetHubKusama` Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to `AssetHubKusama` chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with -/// `AssetHubKusama` `construct_runtime`, so that we maintain SCALE-compatibility. -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// `ToPolkadotXcmRouter` bridge pallet. - #[codec(index = 43)] - ToPolkadotXcmRouter(XcmBridgeHubRouterCall), -} - -frame_support::parameter_types! { - /// Some sane weight to execute `xcm::Transact(pallet-xcm-bridge-hub-router::Call::report_bridge_status)`. - pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); - - /// Base delivery fee to `BridgeHubKusama`. - /// (initially was calculated `170733333` + `10%` by test `BridgeHubKusama::can_calculate_weight_for_paid_export_message_with_reserve_transfer`) - pub const BridgeHubKusamaBaseFeeInDots: u128 = 187806666; -} diff --git a/bridges/primitives/chain-asset-hub-polkadot/Cargo.toml b/bridges/primitives/chain-asset-hub-polkadot/Cargo.toml deleted file mode 100644 index 9c1b1a1f326..00000000000 --- a/bridges/primitives/chain-asset-hub-polkadot/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "bp-asset-hub-polkadot" -description = "Primitives of AssetHubPolkadot parachain runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } - -# Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-xcm-bridge-hub-router/std", - "codec/std", - "frame-support/std", - "scale-info/std", - "sp-runtime/std", -] diff --git a/bridges/primitives/chain-asset-hub-polkadot/src/lib.rs b/bridges/primitives/chain-asset-hub-polkadot/src/lib.rs deleted file mode 100644 index 486fba60e1f..00000000000 --- a/bridges/primitives/chain-asset-hub-polkadot/src/lib.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects AssetHubPolkadot runtime setup. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, Encode}; -use scale_info::TypeInfo; - -pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; - -/// `AssetHubPolkadot` Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to `AssetHubPolkadot` chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with -/// `AssetHubPolkadot` `construct_runtime`, so that we maintain SCALE-compatibility. -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// `ToKusamaXcmRouter` bridge pallet. - #[codec(index = 43)] - ToKusamaXcmRouter(XcmBridgeHubRouterCall), -} - -frame_support::parameter_types! { - /// Some sane weight to execute `xcm::Transact(pallet-xcm-bridge-hub-router::Call::report_bridge_status)`. - pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); - - /// Base delivery fee to `BridgeHubPolkadot`. - /// (initially was calculated `51220000` + `10%` by test `BridgeHubPolkadot::can_calculate_weight_for_paid_export_message_with_reserve_transfer`) - pub const BridgeHubPolkadotBaseFeeInDots: u128 = 56342000; -} diff --git a/bridges/primitives/chain-asset-hub-rococo/src/lib.rs b/bridges/primitives/chain-asset-hub-rococo/src/lib.rs index 6216b24d75c..de2e9ae856d 100644 --- a/bridges/primitives/chain-asset-hub-rococo/src/lib.rs +++ b/bridges/primitives/chain-asset-hub-rococo/src/lib.rs @@ -34,9 +34,6 @@ pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; #[allow(clippy::large_enum_variant)] #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] pub enum Call { - /// `ToWococoXcmRouter` bridge pallet. - #[codec(index = 43)] - ToWococoXcmRouter(XcmBridgeHubRouterCall), /// `ToWestendXcmRouter` bridge pallet. #[codec(index = 45)] ToWestendXcmRouter(XcmBridgeHubRouterCall), diff --git a/bridges/primitives/chain-asset-hub-wococo/Cargo.toml b/bridges/primitives/chain-asset-hub-wococo/Cargo.toml deleted file mode 100644 index e1a5a262157..00000000000 --- a/bridges/primitives/chain-asset-hub-wococo/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "bp-asset-hub-wococo" -description = "Primitives of AssetHubWococo parachain runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } - -# Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-xcm-bridge-hub-router/std", - "codec/std", - "frame-support/std", - "scale-info/std", -] diff --git a/bridges/primitives/chain-asset-hub-wococo/src/lib.rs b/bridges/primitives/chain-asset-hub-wococo/src/lib.rs deleted file mode 100644 index c04eb04cce7..00000000000 --- a/bridges/primitives/chain-asset-hub-wococo/src/lib.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects AssetHubWococo runtime setup. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, Encode}; -use scale_info::TypeInfo; - -pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; - -/// `AssetHubWococo` Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to `AssetHubWococo` chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with -/// `AssetHubWococo` `construct_runtime`, so that we maintain SCALE-compatibility. -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// `ToRococoXcmRouter` bridge pallet. - #[codec(index = 44)] - ToRococoXcmRouter(XcmBridgeHubRouterCall), -} - -frame_support::parameter_types! { - /// Some sane weight to execute `xcm::Transact(pallet-xcm-bridge-hub-router::Call::report_bridge_status)`. - pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); -} - -/// Identifier of AssetHubWococo in the Wococo relay chain. -pub const ASSET_HUB_WOCOCO_PARACHAIN_ID: u32 = 1000; diff --git a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml b/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml index 24cf7236d45..46697913674 100644 --- a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bp-bridge-hub-cumulus" -description = "Primitives of BridgeHubRococo parachain runtime." +description = "Primitives for BridgeHub parachain runtimes." version = "0.1.0" authors.workspace = true edition.workspace = true diff --git a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml b/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml index 387f5e8ade6..c4cd229ef43 100644 --- a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bp-bridge-hub-kusama" -description = "Primitives of BridgeHubRococo parachain runtime." +description = "Primitives of BridgeHubKusama parachain runtime." version = "0.1.0" authors.workspace = true edition.workspace = true diff --git a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml b/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml index 40b386e22d2..4913d87e5fb 100644 --- a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bp-bridge-hub-polkadot" -description = "Primitives of BridgeHubWococo parachain runtime." +description = "Primitives of BridgeHubPolkadot parachain runtime." version = "0.1.0" authors.workspace = true edition.workspace = true diff --git a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs b/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs index e72e711de92..59d293edf1c 100644 --- a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs +++ b/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs @@ -74,9 +74,6 @@ pub const WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME: &str = "BridgeRococoMessa /// chains. pub const WITH_BRIDGE_HUB_ROCOCO_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; -/// Pallet index of `BridgeWococoMessages: pallet_bridge_messages::`. -pub const WITH_BRIDGE_ROCOCO_TO_WOCOCO_MESSAGES_PALLET_INDEX: u8 = 46; - /// Pallet index of `BridgeWestendMessages: pallet_bridge_messages::`. pub const WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX: u8 = 51; diff --git a/bridges/primitives/chain-bridge-hub-wococo/Cargo.toml b/bridges/primitives/chain-bridge-hub-wococo/Cargo.toml deleted file mode 100644 index 17c134f4412..00000000000 --- a/bridges/primitives/chain-bridge-hub-wococo/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "bp-bridge-hub-wococo" -description = "Primitives of BridgeHubWococo parachain runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] - -# Bridge Dependencies - -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } - -# Substrate Based Dependencies - -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-bridge-hub-cumulus/std", - "bp-messages/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs b/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs deleted file mode 100644 index c8bd397cec5..00000000000 --- a/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects BridgeHubWococo runtime setup -//! (AccountId, Headers, Hashes...) - -#![cfg_attr(not(feature = "std"), no_std)] - -pub use bp_bridge_hub_cumulus::*; -use bp_messages::*; -use bp_runtime::{ - decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, Parachain, -}; -use frame_support::dispatch::DispatchClass; -use sp_runtime::RuntimeDebug; - -/// BridgeHubWococo parachain. -#[derive(RuntimeDebug)] -pub struct BridgeHubWococo; - -impl Chain for BridgeHubWococo { - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Nonce = Nonce; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) - } - - fn max_extrinsic_weight() -> Weight { - BlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) - } -} - -impl Parachain for BridgeHubWococo { - const PARACHAIN_ID: u32 = BRIDGE_HUB_WOCOCO_PARACHAIN_ID; -} - -/// Identifier of BridgeHubWococo in the Wococo relay chain. -pub const BRIDGE_HUB_WOCOCO_PARACHAIN_ID: u32 = 1014; - -/// Name of the With-BridgeHubWococo messages pallet instance that is deployed at bridged chains. -pub const WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME: &str = "BridgeWococoMessages"; - -/// Name of the With-BridgeHubWococo bridge-relayers pallet instance that is deployed at bridged -/// chains. -pub const WITH_BRIDGE_HUB_WOCOCO_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; - -/// Pallet index of `BridgeRococoMessages: pallet_bridge_messages::`. -pub const WITH_BRIDGE_WOCOCO_TO_ROCOCO_MESSAGES_PALLET_INDEX: u8 = 45; - -decl_bridge_finality_runtime_apis!(bridge_hub_wococo); -decl_bridge_messages_runtime_apis!(bridge_hub_wococo); - -frame_support::parameter_types! { - /// The XCM fee that is paid for executing XCM program (with `ExportMessage` instruction) at the Wococo - /// BridgeHub. - /// (initially was calculated by test `BridgeHubWococo::can_calculate_weight_for_paid_export_message_with_reserve_transfer` + `33%`) - pub const BridgeHubWococoBaseXcmFeeInWocs: u128 = 1624803349; - - /// Transaction fee that is paid at the Wococo BridgeHub for delivering single inbound message. - /// (initially was calculated by test `BridgeHubWococo::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) - pub const BridgeHubWococoBaseDeliveryFeeInWocs: u128 = 6417262881; - - /// Transaction fee that is paid at the Wococo BridgeHub for delivering single outbound message confirmation. - /// (initially was calculated by test `BridgeHubWococo::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) - pub const BridgeHubWococoBaseConfirmationFeeInWocs: u128 = 6159996668; -} diff --git a/bridges/primitives/chain-wococo/Cargo.toml b/bridges/primitives/chain-wococo/Cargo.toml deleted file mode 100644 index 05901821b36..00000000000 --- a/bridges/primitives/chain-wococo/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "bp-wococo" -description = "Primitives of Wococo runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] - -# Bridge Dependencies - -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-rococo = { path = "../chain-rococo", default-features = false } - -# Substrate Based Dependencies - -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-header-chain/std", - "bp-polkadot-core/std", - "bp-rococo/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-std/std", -] diff --git a/bridges/primitives/chain-wococo/src/lib.rs b/bridges/primitives/chain-wococo/src/lib.rs deleted file mode 100644 index b1df65630be..00000000000 --- a/bridges/primitives/chain-wococo/src/lib.rs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] - -pub use bp_polkadot_core::*; -pub use bp_rococo::{ - SS58Prefix, MAX_AUTHORITIES_COUNT, MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE, PARAS_PALLET_NAME, -}; - -use bp_header_chain::ChainWithGrandpa; -use bp_runtime::{decl_bridge_finality_runtime_apis, Chain}; -use frame_support::weights::Weight; - -/// Wococo Chain -pub struct Wococo; - -impl Chain for Wococo { - type BlockNumber = ::BlockNumber; - type Hash = ::Hash; - type Hasher = ::Hasher; - type Header = ::Header; - - type AccountId = ::AccountId; - type Balance = ::Balance; - type Nonce = ::Nonce; - type Signature = ::Signature; - - fn max_extrinsic_size() -> u32 { - PolkadotLike::max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - PolkadotLike::max_extrinsic_weight() - } -} - -impl ChainWithGrandpa for Wococo { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_WOCOCO_GRANDPA_PALLET_NAME; - const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_HEADER_SIZE: u32 = MAX_HEADER_SIZE; - const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = AVERAGE_HEADER_SIZE_IN_JUSTIFICATION; -} - -// The SignedExtension used by Wococo. -pub use bp_rococo::CommonSignedExtension as SignedExtension; - -/// Name of the With-Wococo GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_WOCOCO_GRANDPA_PALLET_NAME: &str = "BridgeWococoGrandpa"; - -decl_bridge_finality_runtime_apis!(wococo, grandpa); diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index e1809e14524..b78023efb1b 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -280,7 +280,7 @@ pub type TransactionEraOf = crate::TransactionEra, HashOf /// - constants that are stringified names of runtime API methods: /// - `BEST_FINALIZED__HEADER_METHOD` /// - `_ACCEPTED__FINALITY_PROOFS_METHOD` -/// The name of the chain has to be specified in snake case (e.g. `rialto_parachain`). +/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). #[macro_export] macro_rules! decl_bridge_finality_runtime_apis { ($chain: ident $(, $consensus: ident => $justification_type: ty)?) => { @@ -332,7 +332,7 @@ macro_rules! decl_bridge_finality_runtime_apis { /// - `FromInboundLaneApi` /// - constants that are stringified names of runtime API methods: /// - `FROM__MESSAGE_DETAILS_METHOD`, -/// The name of the chain has to be specified in snake case (e.g. `rialto_parachain`). +/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). #[macro_export] macro_rules! decl_bridge_messages_runtime_apis { ($chain: ident) => { @@ -390,7 +390,7 @@ macro_rules! decl_bridge_messages_runtime_apis { /// Convenience macro that declares bridge finality runtime apis, bridge messages runtime apis /// and related constants for a chain. -/// The name of the chain has to be specified in snake case (e.g. `rialto_parachain`). +/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). #[macro_export] macro_rules! decl_bridge_runtime_apis { ($chain: ident $(, $consensus: ident)?) => { diff --git a/bridges/primitives/runtime/src/extensions.rs b/bridges/primitives/runtime/src/extensions.rs index 44eeaad93c9..8a618721b23 100644 --- a/bridges/primitives/runtime/src/extensions.rs +++ b/bridges/primitives/runtime/src/extensions.rs @@ -88,7 +88,7 @@ pub type BridgeRejectObsoleteHeadersAndMessages = GenericSignedExtensionSchema<( /// wildcard/placeholder, which relies on the scale encoding for `()` or `((), ())`, or `((), (), /// ())` is the same. So runtime can contains any kind of tuple: /// `(BridgeRefundBridgeHubRococoMessages)` -/// `(BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWococoMessages)` +/// `(BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWestendMessages)` /// `(BridgeRefundParachainMessages1, ..., BridgeRefundParachainMessagesN)` pub type RefundBridgedParachainMessagesSchema = GenericSignedExtensionSchema<(), ()>; diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index e5277d8db6a..0513cfa2a6c 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -61,15 +61,6 @@ pub use sp_runtime::paste; /// Use this when something must be shared among all instances. pub const NO_INSTANCE_ID: ChainId = [0, 0, 0, 0]; -/// Rialto chain id. -pub const RIALTO_CHAIN_ID: ChainId = *b"rlto"; - -/// RialtoParachain chain id. -pub const RIALTO_PARACHAIN_CHAIN_ID: ChainId = *b"rlpa"; - -/// Millau chain id. -pub const MILLAU_CHAIN_ID: ChainId = *b"mlau"; - /// Polkadot chain id. pub const POLKADOT_CHAIN_ID: ChainId = *b"pdot"; @@ -88,15 +79,9 @@ pub const ASSET_HUB_WESTEND_CHAIN_ID: ChainId = *b"ahwe"; /// Rococo chain id. pub const ROCOCO_CHAIN_ID: ChainId = *b"roco"; -/// Wococo chain id. -pub const WOCOCO_CHAIN_ID: ChainId = *b"woco"; - /// BridgeHubRococo chain id. pub const BRIDGE_HUB_ROCOCO_CHAIN_ID: ChainId = *b"bhro"; -/// BridgeHubWococo chain id. -pub const BRIDGE_HUB_WOCOCO_CHAIN_ID: ChainId = *b"bhwo"; - /// BridgeHubWestend chain id. pub const BRIDGE_HUB_WESTEND_CHAIN_ID: ChainId = *b"bhwd"; @@ -277,18 +262,6 @@ pub fn storage_map_final_key( StorageKey(final_key) } -/// This is how a storage key of storage parameter (`parameter_types! { storage Param: bool = false; -/// }`) is computed. -/// -/// Copied from `frame_support::parameter_types` macro. -pub fn storage_parameter_key(parameter_name: &str) -> StorageKey { - let mut buffer = Vec::with_capacity(1 + parameter_name.len() + 1); - buffer.push(b':'); - buffer.extend_from_slice(parameter_name.as_bytes()); - buffer.push(b':'); - StorageKey(sp_io::hashing::twox_128(&buffer).to_vec()) -} - /// This is how a storage key of storage value is computed. /// /// Copied from `frame_support::storage::storage_prefix`. @@ -574,14 +547,6 @@ where mod tests { use super::*; - #[test] - fn storage_parameter_key_works() { - assert_eq!( - storage_parameter_key("MillauToRialtoConversionRate"), - StorageKey(hex_literal::hex!("58942375551bb0af1682f72786b59d04").to_vec()), - ); - } - #[test] fn storage_value_key_works() { assert_eq!( diff --git a/bridges/scripts/verify-pallets-build.sh b/bridges/scripts/verify-pallets-build.sh index e797f77d026..b96bbf1833b 100755 --- a/bridges/scripts/verify-pallets-build.sh +++ b/bridges/scripts/verify-pallets-build.sh @@ -61,19 +61,12 @@ trap revert_to_clean_state EXIT rm -rf $BRIDGES_FOLDER/.config rm -rf $BRIDGES_FOLDER/.github rm -rf $BRIDGES_FOLDER/.maintain -rm -rf $BRIDGES_FOLDER/bin/millau -rm -rf $BRIDGES_FOLDER/bin/rialto -rm -rf $BRIDGES_FOLDER/bin/rialto-parachain -rm -rf $BRIDGES_FOLDER/bin/.keep rm -rf $BRIDGES_FOLDER/deployments rm -f $BRIDGES_FOLDER/docs/dockerhub-* rm -rf $BRIDGES_FOLDER/fuzz rm -rf $BRIDGES_FOLDER/modules/beefy rm -rf $BRIDGES_FOLDER/modules/shift-session-manager rm -rf $BRIDGES_FOLDER/primitives/beefy -rm -rf $BRIDGES_FOLDER/primitives/chain-millau -rm -rf $BRIDGES_FOLDER/primitives/chain-rialto -rm -rf $BRIDGES_FOLDER/primitives/chain-rialto-parachain rm -rf $BRIDGES_FOLDER/relays rm -rf $BRIDGES_FOLDER/scripts/add_license.sh rm -rf $BRIDGES_FOLDER/scripts/build-containers.sh @@ -81,8 +74,6 @@ rm -rf $BRIDGES_FOLDER/scripts/ci-cache.sh rm -rf $BRIDGES_FOLDER/scripts/dump-logs.sh rm -rf $BRIDGES_FOLDER/scripts/license_header rm -rf $BRIDGES_FOLDER/scripts/regenerate_runtimes.sh -rm -rf $BRIDGES_FOLDER/scripts/send-message-from-millau-rialto.sh -rm -rf $BRIDGES_FOLDER/scripts/send-message-from-rialto-millau.sh rm -rf $BRIDGES_FOLDER/scripts/update-weights.sh rm -rf $BRIDGES_FOLDER/scripts/update-weights-setup.sh rm -rf $BRIDGES_FOLDER/scripts/update_substrate.sh diff --git a/bridges/zombienet/README.md b/bridges/zombienet/README.md index 7f7de770814..b601154b624 100644 --- a/bridges/zombienet/README.md +++ b/bridges/zombienet/README.md @@ -1,4 +1,4 @@ -# Bridges Tests for Local Rococo <> Wococo Bridge +# Bridges Tests for Local Rococo <> Westend Bridge This folder contains [zombienet](https://github.com/paritytech/zombienet/) based integration tests for both onchain and offchain bridges code. Due to some @@ -9,7 +9,7 @@ To start those tests, you need to: - download latest [zombienet release](https://github.com/paritytech/zombienet/releases); -- build Polkadot binary by running `cargo build -p polkadot --release` command in the +- build Polkadot binary by running `cargo build -p polkadot --release --features fast-runtime` command in the [`polkadot-sdk`](https://github.com/paritytech/polkadot-sdk) repository clone; - build Polkadot Parachain binary by running `cargo build -p polkadot-parachain-bin --release` command in the diff --git a/bridges/zombienet/helpers/wait-hrmp-channel-opened.js b/bridges/zombienet/helpers/wait-hrmp-channel-opened.js new file mode 100644 index 00000000000..e700cab1d74 --- /dev/null +++ b/bridges/zombienet/helpers/wait-hrmp-channel-opened.js @@ -0,0 +1,22 @@ +async function run(nodeName, networkInfo, args) { + const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + const sibling = args[0]; + + while (true) { + const messagingStateAsObj = await api.query.parachainSystem.relevantMessagingState(); + const messagingState = api.createType("Option", messagingStateAsObj); + if (messagingState.isSome) { + const egressChannels = messagingState.unwrap().egressChannels; + if (egressChannels.find(x => x[0] == sibling)) { + return; + } + } + + // else sleep and retry + await new Promise((resolve) => setTimeout(resolve, 12000)); + } +} + +module.exports = { run } diff --git a/bridges/zombienet/run-tests.sh b/bridges/zombienet/run-tests.sh index 1fdbc6b8d61..22fefd09360 100755 --- a/bridges/zombienet/run-tests.sh +++ b/bridges/zombienet/run-tests.sh @@ -11,11 +11,11 @@ export BRIDGE_TESTS_FOLDER=$POLKADOT_SDK_FOLDER/bridges/zombienet/tests export POLKADOT_BINARY_PATH=$POLKADOT_SDK_FOLDER/target/release/polkadot export POLKADOT_PARACHAIN_BINARY_PATH=$POLKADOT_SDK_FOLDER/target/release/polkadot-parachain export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO=$POLKADOT_PARACHAIN_BINARY_PATH -export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO=$POLKADOT_PARACHAIN_BINARY_PATH +export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND=$POLKADOT_PARACHAIN_BINARY_PATH export ZOMBIENET_BINARY_PATH=~/local_bridge_testing/bin/zombienet-linux # bridge configuration -export LANE_ID="00000001" +export LANE_ID="00000002" # tests configuration ALL_TESTS_FOLDER=`mktemp -d` diff --git a/bridges/zombienet/scripts/invoke-script.sh b/bridges/zombienet/scripts/invoke-script.sh index cb21d61ab91..6a3754a8824 100755 --- a/bridges/zombienet/scripts/invoke-script.sh +++ b/bridges/zombienet/scripts/invoke-script.sh @@ -1,5 +1,5 @@ #!/bin/bash pushd $POLKADOT_SDK_FOLDER/cumulus/scripts -./bridges_rococo_wococo.sh $1 +./bridges_rococo_westend.sh $1 popd diff --git a/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl b/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl new file mode 100644 index 00000000000..f68d658cdac --- /dev/null +++ b/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl @@ -0,0 +1,26 @@ +Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub +Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml +Creds: config + +# step 1: initialize Westend asset hub +asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-westend-local" within 240 seconds +asset-hub-westend-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1002" within 400 seconds + +# step 2: initialize Westend bridge hub +bridge-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-westend-local" within 120 seconds + +# step 3: relay is started elsewhere - let's wait until with-Rococo GRANPDA pallet is initialized at Westend +bridge-hub-westend-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds + +# step 2: send WOC to Rococo +asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 60 seconds + +# step 3: elsewhere Rococo has sent ROC to //Alice - let's wait for it +asset-hub-westend-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 600 seconds + +# step 4: check that the relayer //Charlie is rewarded by both our AH and target AH +bridge-hub-westend-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726f,BridgedChain,0" within 300 seconds +bridge-hub-westend-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 300 seconds + +# wait until other network test has completed OR exit with an error too +asset-hub-westend-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-wococo.zndsl b/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-wococo.zndsl deleted file mode 100644 index a1af2625c1c..00000000000 --- a/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-wococo.zndsl +++ /dev/null @@ -1,25 +0,0 @@ -Description: User is able to transfer ROC from Rococo Asset Hub to Wococo Asset Hub -Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml -Creds: config - -# step 1: initialize Wococo asset hub -asset-hub-wococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-wococo-local" within 120 seconds - -# step 2: initialize Wococo bridge hub -bridge-hub-wococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-wococo-local" within 120 seconds - -# step 3: relay is started elsewhere - let's wait until with-Rococo GRANPDA pallet is initialized at Wococo -bridge-hub-wococo-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds - -# step 2: send WOC to Rococo -asset-hub-wococo-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-wococo-local" within 60 seconds - -# step 3: elsewhere Rococo has sent ROC to //Alice - let's wait for it -asset-hub-wococo-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 600 seconds - -# step 4: check that the relayer //Charlie is rewarded by both our AH and target AH -bridge-hub-wococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000001,0x6268726F,BridgedChain,0" within 300 seconds -bridge-hub-wococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000001,0x6268726F,ThisChain,0" within 300 seconds - -# wait until other network test has completed OR exit with an error too -asset-hub-wococo-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/bridges/zombienet/tests/0001-asset-transfer-works-wococo-to-rococo.zndsl b/bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl similarity index 64% rename from bridges/zombienet/tests/0001-asset-transfer-works-wococo-to-rococo.zndsl rename to bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl index ad2446d58ce..c862fa6d176 100644 --- a/bridges/zombienet/tests/0001-asset-transfer-works-wococo-to-rococo.zndsl +++ b/bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl @@ -1,25 +1,26 @@ -Description: User is able to transfer WOC from Wococo Asset Hub to Rococo Asset Hub +Description: User is able to transfer WOC from Westend Asset Hub to Rococo Asset Hub Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml Creds: config # step 1: initialize Rococo asset hub -asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-rococo-local" within 120 seconds +asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-rococo-local" within 240 seconds +asset-hub-rococo-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1013" within 400 seconds # step 2: initialize Rococo bridge hub bridge-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-rococo-local" within 120 seconds -# step 3: relay is started elsewhere - let's wait until with-Wococo GRANPDA pallet is initialized at Rococo -bridge-hub-rococo-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Wococo,0" within 400 seconds +# step 3: relay is started elsewhere - let's wait until with-Westend GRANPDA pallet is initialized at Rococo +bridge-hub-rococo-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Westend,0" within 400 seconds -# step 4: send ROC to Wococo +# step 4: send ROC to Westend asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 60 seconds -# step 5: elsewhere Wococo has sent WOC to //Alice - let's wait for it -asset-hub-rococo-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Wococo" within 600 seconds +# step 5: elsewhere Westend has sent WOC to //Alice - let's wait for it +asset-hub-rococo-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Westend" within 600 seconds # step 6: check that the relayer //Charlie is rewarded by both our AH and target AH -bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000001,0x6268776F,BridgedChain,0" within 300 seconds -bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000001,0x6268776F,ThisChain,0" within 300 seconds +bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,BridgedChain,0" within 300 seconds +bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 300 seconds # wait until other network test has completed OR exit with an error too asset-hub-rococo-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/bridges/zombienet/tests/0001-start-relay.sh b/bridges/zombienet/tests/0001-start-relay.sh old mode 100644 new mode 100755 index fc231fba895..7be2cf4d593 --- a/bridges/zombienet/tests/0001-start-relay.sh +++ b/bridges/zombienet/tests/0001-start-relay.sh @@ -1,5 +1,5 @@ #!/bin/bash pushd $POLKADOT_SDK_FOLDER/cumulus/scripts -./bridges_rococo_wococo.sh run-relay +./bridges_rococo_westend.sh run-relay popd diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs index 4af84c82e98..fa9a287adf8 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs @@ -56,19 +56,11 @@ pub fn genesis() -> Storage { safe_xcm_version: Some(SAFE_XCM_VERSION), ..Default::default() }, - bridge_wococo_grandpa: bridge_hub_rococo_runtime::BridgeWococoGrandpaConfig { + bridge_westend_grandpa: bridge_hub_rococo_runtime::BridgeWestendGrandpaConfig { owner: Some(get_account_id_from_seed::(accounts::BOB)), ..Default::default() }, - bridge_rococo_grandpa: bridge_hub_rococo_runtime::BridgeRococoGrandpaConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), - ..Default::default() - }, - bridge_rococo_messages: bridge_hub_rococo_runtime::BridgeRococoMessagesConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), - ..Default::default() - }, - bridge_wococo_messages: bridge_hub_rococo_runtime::BridgeWococoMessagesConfig { + bridge_westend_messages: bridge_hub_rococo_runtime::BridgeWestendMessagesConfig { owner: Some(get_account_id_from_seed::(accounts::BOB)), ..Default::default() }, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index f4f33677d4a..0773eb0c8eb 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -86,9 +86,7 @@ assets-common = { path = "../common", default-features = false } pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-asset-hub-wococo = { path = "../../../../../bridges/primitives/chain-asset-hub-wococo", default-features = false } bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-wococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-wococo", default-features = false } bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } [dev-dependencies] @@ -180,10 +178,8 @@ std = [ "assets-common/std", "bp-asset-hub-rococo/std", "bp-asset-hub-westend/std", - "bp-asset-hub-wococo/std", "bp-bridge-hub-rococo/std", "bp-bridge-hub-westend/std", - "bp-bridge-hub-wococo/std", "codec/std", "cumulus-pallet-aura-ext/std", "cumulus-pallet-dmp-queue/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 06dcfb99a65..4492971566b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -16,12 +16,6 @@ //! # Asset Hub Rococo Runtime //! //! Asset Hub Rococo, formerly known as "Rockmine", is the test network for its Kusama cousin. -//! -//! This runtime is also used for Asset Hub Wococo. But we dont want to create another exact copy of -//! Asset Hub Rococo, so we injected some tweaks backed by `RuntimeFlavor` and `pub storage Flavor: -//! RuntimeFlavor`. (For example this is needed for successful asset transfer between Asset Hub -//! Rococo and Asset Hub Wococo, where we need to have correct `xcm_config::UniversalLocation` with -//! correct `GlobalConsensus`. #![cfg_attr(not(feature = "std"), no_std)] #![recursion_limit = "256"] @@ -106,15 +100,6 @@ use crate::xcm_config::{ }; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; -/// Enum for handling differences in the runtime configuration for `AssetHubRococo` vs. -/// `AssetHubWococo`. -#[derive(Default, Eq, PartialEq, Debug, Clone, Copy, Decode, Encode)] -pub enum RuntimeFlavor { - #[default] - Rococo, - Wococo, -} - impl_opaque_keys! { pub struct SessionKeys { pub aura: Aura, @@ -866,73 +851,11 @@ impl pallet_nfts::Config for Runtime { type Helper = (); } -/// XCM router instance to BridgeHub with bridging capabilities for `Wococo` global -/// consensus with dynamic fees and back-pressure. -pub type ToWococoXcmRouterInstance = pallet_xcm_bridge_hub_router::Instance1; -impl pallet_xcm_bridge_hub_router::Config for Runtime { - type WeightInfo = weights::pallet_xcm_bridge_hub_router_to_wococo::WeightInfo; - - type UniversalLocation = xcm_config::UniversalLocation; - type BridgedNetworkId = xcm_config::bridging::to_wococo::WococoNetwork; - type Bridges = xcm_config::bridging::NetworkExportTable; - - #[cfg(not(feature = "runtime-benchmarks"))] - type BridgeHubOrigin = EnsureXcm>; - #[cfg(feature = "runtime-benchmarks")] - type BridgeHubOrigin = EitherOfDiverse< - // for running benchmarks - EnsureRoot, - // for running tests with `--feature runtime-benchmarks` - EnsureXcm>, - >; - - type ToBridgeHubSender = XcmpQueue; - type WithBridgeHubChannel = - cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider< - xcm_config::bridging::SiblingBridgeHubParaId, - Runtime, - >; - - type ByteFee = xcm_config::bridging::XcmBridgeHubRouterByteFee; - type FeeAsset = xcm_config::bridging::XcmBridgeHubRouterFeeAssetId; -} - -/// XCM router instance to BridgeHub with bridging capabilities for `Rococo` global -/// consensus with dynamic fees and back-pressure. -pub type ToRococoXcmRouterInstance = pallet_xcm_bridge_hub_router::Instance2; -impl pallet_xcm_bridge_hub_router::Config for Runtime { - type WeightInfo = weights::pallet_xcm_bridge_hub_router_to_rococo::WeightInfo; - - type UniversalLocation = xcm_config::UniversalLocation; - type BridgedNetworkId = xcm_config::bridging::to_rococo::RococoNetwork; - type Bridges = xcm_config::bridging::NetworkExportTable; - - #[cfg(not(feature = "runtime-benchmarks"))] - type BridgeHubOrigin = EnsureXcm>; - #[cfg(feature = "runtime-benchmarks")] - type BridgeHubOrigin = EitherOfDiverse< - // for running benchmarks - EnsureRoot, - // for running tests with `--feature runtime-benchmarks` - EnsureXcm>, - >; - - type ToBridgeHubSender = XcmpQueue; - type WithBridgeHubChannel = - cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider< - xcm_config::bridging::SiblingBridgeHubParaId, - Runtime, - >; - - type ByteFee = xcm_config::bridging::XcmBridgeHubRouterByteFee; - type FeeAsset = xcm_config::bridging::XcmBridgeHubRouterFeeAssetId; -} - /// XCM router instance to BridgeHub with bridging capabilities for `Westend` global /// consensus with dynamic fees and back-pressure. pub type ToWestendXcmRouterInstance = pallet_xcm_bridge_hub_router::Instance3; impl pallet_xcm_bridge_hub_router::Config for Runtime { - type WeightInfo = weights::pallet_xcm_bridge_hub_router_to_westend::WeightInfo; + type WeightInfo = weights::pallet_xcm_bridge_hub_router::WeightInfo; type UniversalLocation = xcm_config::UniversalLocation; type BridgedNetworkId = xcm_config::bridging::to_westend::WestendNetwork; @@ -996,8 +919,6 @@ construct_runtime!( Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 42, // Bridge utilities. - ToWococoXcmRouter: pallet_xcm_bridge_hub_router::::{Pallet, Storage, Call} = 43, - ToRococoXcmRouter: pallet_xcm_bridge_hub_router::::{Pallet, Storage, Call} = 44, ToWestendXcmRouter: pallet_xcm_bridge_hub_router::::{Pallet, Storage, Call} = 45, // The main stage. @@ -1073,9 +994,7 @@ mod benches { [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] - [pallet_xcm_bridge_hub_router, ToWococo] [pallet_xcm_bridge_hub_router, ToWestend] - [pallet_xcm_bridge_hub_router, ToRococo] // XCM [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. @@ -1332,9 +1251,7 @@ impl_runtime_apis! { type Foreign = pallet_assets::Pallet::; type Pool = pallet_assets::Pallet::; - type ToWococo = XcmBridgeHubRouterBench; type ToWestend = XcmBridgeHubRouterBench; - type ToRococo = XcmBridgeHubRouterBench; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -1402,19 +1319,6 @@ impl_runtime_apis! { } } - impl XcmBridgeHubRouterConfig for Runtime { - fn make_congested() { - cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( - xcm_config::bridging::SiblingBridgeHubParaId::get().into() - ); - } - fn ensure_bridged_target_destination() -> MultiLocation { - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( - xcm_config::bridging::SiblingBridgeHubParaId::get().into() - ); - xcm_config::bridging::to_wococo::AssetHubWococo::get() - } - } impl XcmBridgeHubRouterConfig for Runtime { fn make_congested() { cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( @@ -1428,20 +1332,6 @@ impl_runtime_apis! { xcm_config::bridging::to_westend::AssetHubWestend::get() } } - impl XcmBridgeHubRouterConfig for Runtime { - fn make_congested() { - cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( - xcm_config::bridging::SiblingBridgeHubParaId::get().into() - ); - } - fn ensure_bridged_target_destination() -> MultiLocation { - xcm_config::Flavor::set(&RuntimeFlavor::Wococo); - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( - xcm_config::bridging::SiblingBridgeHubParaId::get().into() - ); - xcm_config::bridging::to_rococo::AssetHubRococo::get() - } - } use xcm::latest::prelude::*; use xcm_config::{TokenLocation, MaxAssetsIntoHolding}; @@ -1498,11 +1388,11 @@ impl_runtime_apis! { MultiAsset { fun: Fungible(UNITS), id: Concrete(TokenLocation::get()) }, )); pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; - // AssetHubRococo trusts AssetHubWococo as reserve for WOCs + // AssetHubRococo trusts AssetHubWestend as reserve for WNDs pub TrustedReserve: Option<(MultiLocation, MultiAsset)> = Some( ( - xcm_config::bridging::to_wococo::AssetHubWococo::get(), - MultiAsset::from((xcm_config::bridging::to_wococo::WocLocation::get(), 1000000000000 as u128)) + xcm_config::bridging::to_westend::AssetHubWestend::get(), + MultiAsset::from((xcm_config::bridging::to_westend::WndLocation::get(), 1000000000000 as u128)) ) ); } @@ -1577,9 +1467,7 @@ impl_runtime_apis! { type Foreign = pallet_assets::Pallet::; type Pool = pallet_assets::Pallet::; - type ToWococo = XcmBridgeHubRouterBench; type ToWestend = XcmBridgeHubRouterBench; - type ToRococo = XcmBridgeHubRouterBench; let whitelist: Vec = vec![ // Block Number diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs index 0fc36d74ff0..252cf2630f4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs @@ -36,9 +36,7 @@ pub mod pallet_timestamp; pub mod pallet_uniques; pub mod pallet_utility; pub mod pallet_xcm; -pub mod pallet_xcm_bridge_hub_router_to_rococo; -pub mod pallet_xcm_bridge_hub_router_to_westend; -pub mod pallet_xcm_bridge_hub_router_to_wococo; +pub mod pallet_xcm_bridge_hub_router; pub mod paritydb_weights; pub mod rocksdb_weights; pub mod xcm; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_westend.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs similarity index 75% rename from cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_westend.rs rename to cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs index 8c344b44f78..7e12453583d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_westend.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_bridge_hub_router` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -48,38 +48,34 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm_bridge_hub_router`. pub struct WeightInfo(PhantomData); impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn on_initialize_when_non_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `193` - // Estimated: `3658` - // Minimum execution time: 8_528_000 picoseconds. - Weight::from_parts(8_886_000, 0) - .saturating_add(Weight::from_parts(0, 3658)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `154` + // Estimated: `1639` + // Minimum execution time: 7_924_000 picoseconds. + Weight::from_parts(8_199_000, 0) + .saturating_add(Weight::from_parts(0, 1639)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_when_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `183` - // Estimated: `3648` - // Minimum execution time: 5_170_000 picoseconds. - Weight::from_parts(5_433_000, 0) - .saturating_add(Weight::from_parts(0, 3648)) - .saturating_add(T::DbWeight::get().reads(3)) + // Measured: `144` + // Estimated: `1629` + // Minimum execution time: 4_265_000 picoseconds. + Weight::from_parts(4_417_000, 0) + .saturating_add(Weight::from_parts(0, 1629)) + .saturating_add(T::DbWeight::get().reads(2)) } /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) @@ -87,16 +83,16 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `150` // Estimated: `1502` - // Minimum execution time: 10_283_000 picoseconds. - Weight::from_parts(10_762_000, 0) + // Minimum execution time: 10_292_000 picoseconds. + Weight::from_parts(10_797_000, 0) .saturating_add(Weight::from_parts(0, 1502)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3302afcb67e838a3f960251b417b9a4f` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x3302afcb67e838a3f960251b417b9a4f` (r:1 w:0) /// Storage: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) /// Proof: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) @@ -113,16 +109,16 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send_message() -> Weight { // Proof Size summary in bytes: // Measured: `387` // Estimated: `3852` - // Minimum execution time: 52_040_000 picoseconds. - Weight::from_parts(53_500_000, 0) + // Minimum execution time: 61_995_000 picoseconds. + Weight::from_parts(65_137_000, 0) .saturating_add(Weight::from_parts(0, 3852)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_rococo.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_rococo.rs deleted file mode 100644 index ff00ace25b8..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_rococo.rs +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_xcm_bridge_hub_router` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_bridge_hub_router -// --chain=asset-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_xcm_bridge_hub_router`. -pub struct WeightInfo(PhantomData); -impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - fn on_initialize_when_non_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `265` - // Estimated: `3730` - // Minimum execution time: 9_084_000 picoseconds. - Weight::from_parts(9_441_000, 0) - .saturating_add(Weight::from_parts(0, 3730)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn on_initialize_when_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `202` - // Estimated: `3667` - // Minimum execution time: 5_971_000 picoseconds. - Weight::from_parts(6_260_000, 0) - .saturating_add(Weight::from_parts(0, 3667)) - .saturating_add(T::DbWeight::get().reads(3)) - } - /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - fn report_bridge_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `117` - // Estimated: `1502` - // Minimum execution time: 10_231_000 picoseconds. - Weight::from_parts(10_861_000, 0) - .saturating_add(Weight::from_parts(0, 1502)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) - /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn send_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `478` - // Estimated: `3943` - // Minimum execution time: 53_966_000 picoseconds. - Weight::from_parts(55_224_000, 0) - .saturating_add(Weight::from_parts(0, 3943)) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_wococo.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_wococo.rs deleted file mode 100644 index ca371f1e6ce..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_wococo.rs +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_xcm_bridge_hub_router` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_bridge_hub_router -// --chain=asset-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_xcm_bridge_hub_router`. -pub struct WeightInfo(PhantomData); -impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ToWococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToWococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - fn on_initialize_when_non_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `231` - // Estimated: `3696` - // Minimum execution time: 9_115_000 picoseconds. - Weight::from_parts(9_522_000, 0) - .saturating_add(Weight::from_parts(0, 3696)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn on_initialize_when_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `183` - // Estimated: `3648` - // Minimum execution time: 5_207_000 picoseconds. - Weight::from_parts(5_534_000, 0) - .saturating_add(Weight::from_parts(0, 3648)) - .saturating_add(T::DbWeight::get().reads(3)) - } - /// Storage: `ToWococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToWococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - fn report_bridge_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `83` - // Estimated: `1502` - // Minimum execution time: 10_437_000 picoseconds. - Weight::from_parts(10_956_000, 0) - .saturating_add(Weight::from_parts(0, 1502)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) - /// Storage: `ToWococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToWococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn send_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `425` - // Estimated: `3890` - // Minimum execution time: 52_176_000 picoseconds. - Weight::from_parts(54_067_000, 0) - .saturating_add(Weight::from_parts(0, 3890)) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index fe5123a427c..7fab3584250 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 20_940_000 picoseconds. - Weight::from_parts(21_453_000, 3593) + // Minimum execution time: 21_643_000 picoseconds. + Weight::from_parts(22_410_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,15 +65,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 44_310_000 picoseconds. - Weight::from_parts(44_948_000, 6196) + // Minimum execution time: 43_758_000 picoseconds. + Weight::from_parts(44_654_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } // Storage: `System::Account` (r:3 w:3) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -92,25 +90,21 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `8799` - // Minimum execution time: 87_226_000 picoseconds. - Weight::from_parts(89_399_000, 8799) - .saturating_add(T::DbWeight::get().reads(11)) + // Minimum execution time: 87_978_000 picoseconds. + Weight::from_parts(88_517_000, 8799) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) pub fn reserve_asset_deposited() -> Weight { // Proof Size summary in bytes: - // Measured: `39` - // Estimated: `3504` - // Minimum execution time: 7_320_000 picoseconds. - Weight::from_parts(7_453_000, 3504) - .saturating_add(T::DbWeight::get().reads(2)) + // Measured: `0` + // Estimated: `1489` + // Minimum execution time: 6_883_000 picoseconds. + Weight::from_parts(6_979_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -131,17 +125,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 183_539_000 picoseconds. - Weight::from_parts(190_968_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 198_882_000 picoseconds. + Weight::from_parts(199_930_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn receive_teleported_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_068_000 picoseconds. - Weight::from_parts(3_228_000, 0) + // Minimum execution time: 3_343_000 picoseconds. + Weight::from_parts(3_487_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -149,15 +143,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 18_788_000 picoseconds. - Weight::from_parts(19_240_000, 3593) + // Minimum execution time: 19_399_000 picoseconds. + Weight::from_parts(19_659_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: `System::Account` (r:2 w:2) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -176,13 +168,11 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6196` - // Minimum execution time: 58_577_000 picoseconds. - Weight::from_parts(59_729_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 59_017_000 picoseconds. + Weight::from_parts(60_543_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -203,9 +193,9 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 45_804_000 picoseconds. - Weight::from_parts(46_702_000, 3610) - .saturating_add(T::DbWeight::get().reads(9)) + // Minimum execution time: 45_409_000 picoseconds. + Weight::from_parts(47_041_000, 3610) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index e2fe122a12d..4454494badc 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -48,8 +48,6 @@ use sp_std::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); impl WeightInfo { - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -70,17 +68,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 415_688_000 picoseconds. - Weight::from_parts(433_876_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 440_298_000 picoseconds. + Weight::from_parts(446_508_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_209_000 picoseconds. - Weight::from_parts(3_465_000, 0) + // Minimum execution time: 3_313_000 picoseconds. + Weight::from_parts(3_422_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -88,61 +86,59 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3568` - // Minimum execution time: 7_940_000 picoseconds. - Weight::from_parts(8_208_000, 3568) + // Minimum execution time: 9_691_000 picoseconds. + Weight::from_parts(9_948_000, 3568) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_336_000 picoseconds. - Weight::from_parts(9_733_000, 0) + // Minimum execution time: 10_384_000 picoseconds. + Weight::from_parts(11_085_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_368_000 picoseconds. - Weight::from_parts(3_700_000, 0) + // Minimum execution time: 3_438_000 picoseconds. + Weight::from_parts(3_577_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(2_034_000, 0) + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_243_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_870_000 picoseconds. - Weight::from_parts(1_972_000, 0) + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_207_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_890_000 picoseconds. - Weight::from_parts(1_962_000, 0) + // Minimum execution time: 2_105_000 picoseconds. + Weight::from_parts(2_193_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_660_000 picoseconds. - Weight::from_parts(2_744_000, 0) + // Minimum execution time: 2_999_000 picoseconds. + Weight::from_parts(3_056_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_845_000 picoseconds. - Weight::from_parts(1_945_000, 0) + // Minimum execution time: 2_091_000 picoseconds. + Weight::from_parts(2_176_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -163,9 +159,9 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 54_283_000 picoseconds. - Weight::from_parts(54_969_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 55_728_000 picoseconds. + Weight::from_parts(56_704_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) @@ -174,8 +170,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 11_850_000 picoseconds. - Weight::from_parts(12_328_000, 3625) + // Minimum execution time: 12_839_000 picoseconds. + Weight::from_parts(13_457_000, 3625) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -183,8 +179,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_891_000 picoseconds. - Weight::from_parts(1_950_000, 0) + // Minimum execution time: 2_116_000 picoseconds. + Weight::from_parts(2_219_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -204,8 +200,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 23_644_000 picoseconds. - Weight::from_parts(24_296_000, 3610) + // Minimum execution time: 24_891_000 picoseconds. + Weight::from_parts(25_583_000, 3610) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -215,47 +211,45 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_719_000 picoseconds. - Weight::from_parts(3_896_000, 0) + // Minimum execution time: 3_968_000 picoseconds. + Weight::from_parts(4_122_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 125_710_000 picoseconds. - Weight::from_parts(132_434_000, 0) + // Minimum execution time: 136_220_000 picoseconds. + Weight::from_parts(137_194_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_650_000 picoseconds. - Weight::from_parts(12_277_000, 0) + // Minimum execution time: 12_343_000 picoseconds. + Weight::from_parts(12_635_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_978_000 picoseconds. - Weight::from_parts(2_070_000, 0) + // Minimum execution time: 2_237_000 picoseconds. + Weight::from_parts(2_315_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_899_000 picoseconds. - Weight::from_parts(2_002_000, 0) + // Minimum execution time: 2_094_000 picoseconds. + Weight::from_parts(2_231_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_133_000 picoseconds. - Weight::from_parts(2_194_000, 0) + // Minimum execution time: 2_379_000 picoseconds. + Weight::from_parts(2_455_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -276,20 +270,18 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 58_644_000 picoseconds. - Weight::from_parts(60_614_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 60_734_000 picoseconds. + Weight::from_parts(61_964_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_185_000 picoseconds. - Weight::from_parts(5_366_000, 0) + // Minimum execution time: 5_500_000 picoseconds. + Weight::from_parts(5_720_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -310,56 +302,54 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 54_443_000 picoseconds. - Weight::from_parts(55_873_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 55_767_000 picoseconds. + Weight::from_parts(56_790_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_909_000 picoseconds. - Weight::from_parts(2_011_000, 0) + // Minimum execution time: 2_201_000 picoseconds. + Weight::from_parts(2_291_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_814_000 picoseconds. - Weight::from_parts(1_956_000, 0) + // Minimum execution time: 2_164_000 picoseconds. + Weight::from_parts(2_241_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_875_000 picoseconds. - Weight::from_parts(2_003_000, 0) + // Minimum execution time: 2_127_000 picoseconds. + Weight::from_parts(2_236_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) pub fn universal_origin() -> Weight { // Proof Size summary in bytes: - // Measured: `39` - // Estimated: `3504` - // Minimum execution time: 7_376_000 picoseconds. - Weight::from_parts(7_620_000, 3504) - .saturating_add(T::DbWeight::get().reads(2)) + // Measured: `0` + // Estimated: `1489` + // Minimum execution time: 4_275_000 picoseconds. + Weight::from_parts(4_381_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_863_000 picoseconds. - Weight::from_parts(1_964_000, 0) + // Minimum execution time: 2_132_000 picoseconds. + Weight::from_parts(2_216_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_956_000 picoseconds. - Weight::from_parts(2_057_000, 0) + // Minimum execution time: 2_265_000 picoseconds. + Weight::from_parts(2_332_000, 0) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index b0bf9e82729..b85cb76642f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -16,9 +16,8 @@ use super::{ AccountId, AllPalletsWithSystem, Assets, Authorship, Balance, Balances, BaseDeliveryFee, FeeAssetId, ForeignAssets, ForeignAssetsInstance, ParachainInfo, ParachainSystem, PolkadotXcm, - PoolAssets, Runtime, RuntimeCall, RuntimeEvent, RuntimeFlavor, RuntimeOrigin, - ToRococoXcmRouter, ToWestendXcmRouter, ToWococoXcmRouter, TransactionByteFee, - TrustBackedAssetsInstance, WeightToFee, XcmpQueue, + PoolAssets, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, ToWestendXcmRouter, + TransactionByteFee, TrustBackedAssetsInstance, WeightToFee, XcmpQueue, }; use assets_common::{ local_and_foreign_assets::MatchesLocalAndForeignAssetsMultiLocation, @@ -26,7 +25,7 @@ use assets_common::{ }; use frame_support::{ match_types, parameter_types, - traits::{ConstU32, Contains, Equals, Everything, Get, Nothing, PalletInfoAccess}, + traits::{ConstU32, Contains, Equals, Everything, Nothing, PalletInfoAccess}, }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; @@ -61,8 +60,8 @@ use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; use cumulus_primitives_core::ParaId; parameter_types! { - pub storage Flavor: RuntimeFlavor = RuntimeFlavor::default(); pub const TokenLocation: MultiLocation = MultiLocation::parent(); + pub const RelayNetwork: NetworkId = NetworkId::Rococo; pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())); @@ -79,22 +78,6 @@ parameter_types! { pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); } -/// Adapter for resolving `NetworkId` based on `pub storage Flavor: RuntimeFlavor`. -pub struct RelayNetwork; -impl Get> for RelayNetwork { - fn get() -> Option { - Some(Self::get()) - } -} -impl Get for RelayNetwork { - fn get() -> NetworkId { - match Flavor::get() { - RuntimeFlavor::Rococo => NetworkId::Rococo, - RuntimeFlavor::Wococo => NetworkId::Wococo, - } - } -} - /// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used /// when determining ownership of accounts for asset transacting and when attempting to use XCM /// `Transact` in order to determine the dispatch Origin. @@ -285,8 +268,7 @@ impl Contains for SafeCallFilter { if items.iter().all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterByteFee::key())) || items .iter() - .all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterBaseFee::key())) || - items.iter().all(|(k, _)| k.eq(&Flavor::key())) => + .all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterBaseFee::key())) => return true, _ => (), }; @@ -475,12 +457,8 @@ impl Contains for SafeCallFilter { pallet_uniques::Call::set_collection_max_supply { .. } | pallet_uniques::Call::set_price { .. } | pallet_uniques::Call::buy_item { .. } - ) | RuntimeCall::ToWococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } ) | RuntimeCall::ToWestendXcmRouter( pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } - ) | RuntimeCall::ToRococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } ) ) } @@ -572,11 +550,7 @@ impl xcm_executor::Config for XcmConfig { // as reserve locations (we trust the Bridge Hub to relay the message that a reserve is being // held). Asset Hub may _act_ as a reserve location for ROC and assets created // under `pallet-assets`. Users must use teleport where allowed (e.g. ROC with the Relay Chain). - type IsReserve = ( - bridging::to_wococo::IsTrustedBridgedReserveLocationForConcreteAsset, - bridging::to_westend::IsTrustedBridgedReserveLocationForConcreteAsset, - bridging::to_rococo::IsTrustedBridgedReserveLocationForConcreteAsset, - ); + type IsReserve = (bridging::to_westend::IsTrustedBridgedReserveLocationForConcreteAsset,); type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; @@ -627,11 +601,7 @@ impl xcm_executor::Config for XcmConfig { XcmFeeToAccount, >; type MessageExporter = (); - type UniversalAliases = ( - bridging::to_wococo::UniversalAliases, - bridging::to_rococo::UniversalAliases, - bridging::to_westend::UniversalAliases, - ); + type UniversalAliases = (bridging::to_westend::UniversalAliases,); type CallDispatcher = WithOriginFilter; type SafeCallFilter = SafeCallFilter; type Aliasers = Nothing; @@ -656,15 +626,9 @@ type LocalXcmRouter = ( /// queues. pub type XcmRouter = WithUniqueTopic<( LocalXcmRouter, - // Router which wraps and sends xcm to BridgeHub to be delivered to the Wococo - // GlobalConsensus - ToWococoXcmRouter, // Router which wraps and sends xcm to BridgeHub to be delivered to the Westend // GlobalConsensus ToWestendXcmRouter, - // Router which wraps and sends xcm to BridgeHub to be delivered to the Rococo - // GlobalConsensus - ToRococoXcmRouter, )>; impl pallet_xcm::Config for Runtime { @@ -731,7 +695,7 @@ impl pallet_asset_conversion::BenchmarkHelper> for BenchmarkMultiLocationConverter where - SelfParaId: Get, + SelfParaId: frame_support::traits::Get, { fn asset_id(asset_id: u32) -> MultiLocation { MultiLocation { @@ -754,7 +718,7 @@ pub mod bridging { use assets_common::matching; use sp_std::collections::btree_set::BTreeSet; - // common/shared parameters for Wococo/Rococo + // common/shared parameters parameter_types! { /// Base price of every byte of the Rococo -> Westend message. Can be adjusted via /// governance `set_storage` call. @@ -775,10 +739,7 @@ pub mod bridging { /// governance `set_storage` call. pub storage XcmBridgeHubRouterByteFee: Balance = TransactionByteFee::get(); - pub SiblingBridgeHubParaId: u32 = match Flavor::get() { - RuntimeFlavor::Rococo => bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - RuntimeFlavor::Wococo => bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - }; + pub SiblingBridgeHubParaId: u32 = bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID; pub SiblingBridgeHub: MultiLocation = MultiLocation::new(1, X1(Parachain(SiblingBridgeHubParaId::get()))); /// Router expects payment with this `AssetId`. /// (`AssetId` has to be aligned with `BridgeTable`) @@ -786,90 +747,12 @@ pub mod bridging { pub BridgeTable: sp_std::vec::Vec = sp_std::vec::Vec::new().into_iter() - .chain(to_wococo::BridgeTable::get()) .chain(to_westend::BridgeTable::get()) - .chain(to_rococo::BridgeTable::get()) .collect(); } pub type NetworkExportTable = xcm_builder::NetworkExportTable; - pub mod to_wococo { - use super::*; - - parameter_types! { - pub SiblingBridgeHubWithBridgeHubWococoInstance: MultiLocation = MultiLocation::new( - 1, - X2( - Parachain(SiblingBridgeHubParaId::get()), - PalletInstance(bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WOCOCO_MESSAGES_PALLET_INDEX) - ) - ); - - pub const WococoNetwork: NetworkId = NetworkId::Wococo; - pub AssetHubWococo: MultiLocation = MultiLocation::new(2, X2(GlobalConsensus(WococoNetwork::get()), Parachain(bp_asset_hub_wococo::ASSET_HUB_WOCOCO_PARACHAIN_ID))); - pub WocLocation: MultiLocation = MultiLocation::new(2, X1(GlobalConsensus(WococoNetwork::get()))); - - pub WocFromAssetHubWococo: (MultiAssetFilter, MultiLocation) = ( - Wild(AllOf { fun: WildFungible, id: Concrete(WocLocation::get()) }), - AssetHubWococo::get() - ); - - /// Set up exporters configuration. - /// `Option` represents static "base fee" which is used for total delivery fee calculation. - pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ - NetworkExportTableItem::new( - WococoNetwork::get(), - Some(sp_std::vec![ - AssetHubWococo::get().interior.split_global().expect("invalid configuration for AssetHubWococo").1, - ]), - SiblingBridgeHub::get(), - // base delivery fee to local `BridgeHub` - Some(( - XcmBridgeHubRouterFeeAssetId::get(), - XcmBridgeHubRouterBaseFee::get(), - ).into()) - ) - ]; - - /// Universal aliases - pub UniversalAliases: BTreeSet<(MultiLocation, Junction)> = BTreeSet::from_iter( - sp_std::vec![ - (SiblingBridgeHubWithBridgeHubWococoInstance::get(), GlobalConsensus(WococoNetwork::get())) - ] - ); - } - - impl Contains<(MultiLocation, Junction)> for UniversalAliases { - fn contains(alias: &(MultiLocation, Junction)) -> bool { - UniversalAliases::get().contains(alias) - } - } - - /// Trusted reserve locations filter for `xcm_executor::Config::IsReserve`. - /// Locations from which the runtime accepts reserved assets. - pub type IsTrustedBridgedReserveLocationForConcreteAsset = - matching::IsTrustedBridgedReserveLocationForConcreteAsset< - UniversalLocation, - ( - // allow receive WOC from AssetHubWococo - xcm_builder::Case, - // and nothing else - ), - >; - - impl Contains for ToWococoXcmRouter { - fn contains(call: &RuntimeCall) -> bool { - matches!( - call, - RuntimeCall::ToWococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } - ) - ) - } - } - } - pub mod to_westend { use super::*; @@ -946,82 +829,6 @@ pub mod bridging { } } - pub mod to_rococo { - use super::*; - - parameter_types! { - pub SiblingBridgeHubWithBridgeHubRococoInstance: MultiLocation = MultiLocation::new( - 1, - X2( - Parachain(SiblingBridgeHubParaId::get()), - PalletInstance(bp_bridge_hub_wococo::WITH_BRIDGE_WOCOCO_TO_ROCOCO_MESSAGES_PALLET_INDEX) - ) - ); - - pub const RococoNetwork: NetworkId = NetworkId::Rococo; - pub AssetHubRococo: MultiLocation = MultiLocation::new(2, X2(GlobalConsensus(RococoNetwork::get()), Parachain(bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID))); - pub RocLocation: MultiLocation = MultiLocation::new(2, X1(GlobalConsensus(RococoNetwork::get()))); - - pub RocFromAssetHubRococo: (MultiAssetFilter, MultiLocation) = ( - Wild(AllOf { fun: WildFungible, id: Concrete(RocLocation::get()) }), - AssetHubRococo::get() - ); - - /// Set up exporters configuration. - /// `Option` represents static "base fee" which is used for total delivery fee calculation. - pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ - NetworkExportTableItem::new( - RococoNetwork::get(), - Some(sp_std::vec![ - AssetHubRococo::get().interior.split_global().expect("invalid configuration for AssetHubRococo").1, - ]), - SiblingBridgeHub::get(), - // base delivery fee to local `BridgeHub` - Some(( - XcmBridgeHubRouterFeeAssetId::get(), - XcmBridgeHubRouterBaseFee::get(), - ).into()) - ) - ]; - - /// Universal aliases - pub UniversalAliases: BTreeSet<(MultiLocation, Junction)> = BTreeSet::from_iter( - sp_std::vec![ - (SiblingBridgeHubWithBridgeHubRococoInstance::get(), GlobalConsensus(RococoNetwork::get())) - ] - ); - } - - impl Contains<(MultiLocation, Junction)> for UniversalAliases { - fn contains(alias: &(MultiLocation, Junction)) -> bool { - UniversalAliases::get().contains(alias) - } - } - - /// Reserve locations filter for `xcm_executor::Config::IsReserve`. - /// Locations from which the runtime accepts reserved assets. - pub type IsTrustedBridgedReserveLocationForConcreteAsset = - matching::IsTrustedBridgedReserveLocationForConcreteAsset< - UniversalLocation, - ( - // allow receive ROC from AssetHubRococo - xcm_builder::Case, - // and nothing else - ), - >; - - impl Contains for ToRococoXcmRouter { - fn contains(call: &RuntimeCall) -> bool { - matches!( - call, - RuntimeCall::ToRococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } - ) - ) - } - } - } - /// Benchmarks helper for bridging configuration. #[cfg(feature = "runtime-benchmarks")] pub struct BridgingBenchmarksHelper; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index b4f4e828dde..7bb71a77de7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -28,8 +28,8 @@ pub use asset_hub_rococo_runtime::{ }, AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, - RuntimeCall, RuntimeEvent, RuntimeFlavor, SessionKeys, System, ToRococoXcmRouterInstance, - ToWestendXcmRouterInstance, ToWococoXcmRouterInstance, TrustBackedAssetsInstance, XcmpQueue, + RuntimeCall, RuntimeEvent, SessionKeys, System, ToWestendXcmRouterInstance, + TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{ test_cases_over_bridge::TestBridgingConfig, CollatorSessionKey, CollatorSessionKeys, ExtBuilder, @@ -674,15 +674,6 @@ fn limited_reserve_transfer_assets_for_native_asset_over_bridge_works( mod asset_hub_rococo_tests { use super::*; - fn bridging_to_asset_hub_wococo() -> TestBridgingConfig { - asset_test_utils::test_cases_over_bridge::TestBridgingConfig { - bridged_network: bridging::to_wococo::WococoNetwork::get(), - local_bridge_hub_para_id: bridging::SiblingBridgeHubParaId::get(), - local_bridge_hub_location: bridging::SiblingBridgeHub::get(), - bridged_target_location: bridging::to_wococo::AssetHubWococo::get(), - } - } - fn bridging_to_asset_hub_westend() -> TestBridgingConfig { asset_test_utils::test_cases_over_bridge::TestBridgingConfig { bridged_network: bridging::to_westend::WestendNetwork::get(), @@ -692,13 +683,6 @@ mod asset_hub_rococo_tests { } } - #[test] - fn limited_reserve_transfer_assets_for_native_asset_to_asset_hub_wococo_works() { - limited_reserve_transfer_assets_for_native_asset_over_bridge_works( - bridging_to_asset_hub_wococo, - ) - } - #[test] fn limited_reserve_transfer_assets_for_native_asset_to_asset_hub_westend_works() { limited_reserve_transfer_assets_for_native_asset_over_bridge_works( @@ -706,31 +690,6 @@ mod asset_hub_rococo_tests { ) } - #[test] - fn receive_reserve_asset_deposited_woc_from_asset_hub_wococo_works() { - const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; - asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - LocationToAccountId, - ForeignAssetsInstance, - >( - collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), - ExistentialDeposit::get(), - AccountId::from([73; 32]), - AccountId::from(BLOCK_AUTHOR_ACCOUNT), - // receiving WOCs - (MultiLocation { parents: 2, interior: X1(GlobalConsensus(Wococo)) }, 1000000000000, 1_000_000_000), - bridging_to_asset_hub_wococo, - ( - X1(PalletInstance(bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WOCOCO_MESSAGES_PALLET_INDEX)), - GlobalConsensus(Wococo), - X1(Parachain(1000)) - ) - ) - } - #[test] fn receive_reserve_asset_deposited_wnd_from_asset_hub_westend_works() { const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; @@ -756,58 +715,6 @@ mod asset_hub_rococo_tests { ) } - #[test] - fn report_bridge_status_from_xcm_bridge_router_for_wococo_works() { - asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - LocationToAccountId, - ToWococoXcmRouterInstance, - >( - collator_session_keys(), - bridging_to_asset_hub_wococo, - || { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_rococo::Call::ToWococoXcmRouter( - bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode() - .into(), - } - ] - .into() - }, - || { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_rococo::Call::ToWococoXcmRouter( - bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: false, - } - ) - .encode() - .into(), - } - ] - .into() - }, - ) - } - #[test] fn report_bridge_status_from_xcm_bridge_router_for_westend_works() { asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< @@ -863,22 +770,6 @@ mod asset_hub_rococo_tests { #[test] fn test_report_bridge_status_call_compatibility() { // if this test fails, make sure `bp_asset_hub_rococo` has valid encoding - assert_eq!( - RuntimeCall::ToWococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode(), - bp_asset_hub_rococo::Call::ToWococoXcmRouter( - bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode() - ); assert_eq!( RuntimeCall::ToWestendXcmRouter( pallet_xcm_bridge_hub_router::Call::report_bridge_status { @@ -897,19 +788,6 @@ mod asset_hub_rococo_tests { ); } - #[test] - fn check_sane_weight_report_bridge_status_for_wococo() { - use pallet_xcm_bridge_hub_router::WeightInfo; - let actual = >::WeightInfo::report_bridge_status(); - let max_weight = bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(); - assert!( - actual.all_lte(max_weight), - "max_weight: {:?} should be adjusted to actual {:?}", - max_weight, - actual - ); - } - #[test] fn check_sane_weight_report_bridge_status_for_westend() { use pallet_xcm_bridge_hub_router::WeightInfo; @@ -955,167 +833,6 @@ mod asset_hub_rococo_tests { } } -mod asset_hub_wococo_tests { - use super::*; - - fn bridging_to_asset_hub_rococo() -> TestBridgingConfig { - TestBridgingConfig { - bridged_network: bridging::to_rococo::RococoNetwork::get(), - local_bridge_hub_para_id: bridging::SiblingBridgeHubParaId::get(), - local_bridge_hub_location: bridging::SiblingBridgeHub::get(), - bridged_target_location: bridging::to_rococo::AssetHubRococo::get(), - } - } - - pub(crate) fn set_wococo_flavor() { - let flavor_key = xcm_config::Flavor::key().to_vec(); - let flavor = RuntimeFlavor::Wococo; - - // encode `set_storage` call - let set_storage_call = RuntimeCall::System(frame_system::Call::::set_storage { - items: vec![(flavor_key, flavor.encode())], - }) - .encode(); - - // estimate - storing just 1 value - use frame_system::WeightInfo; - let require_weight_at_most = - ::SystemWeightInfo::set_storage(1); - - // execute XCM with Transact to `set_storage` as governance does - assert_ok!(RuntimeHelper::execute_as_governance(set_storage_call, require_weight_at_most) - .ensure_complete()); - - // check if stored - assert_eq!(flavor, xcm_config::Flavor::get()); - } - - fn with_wococo_flavor_bridging_to_asset_hub_rococo() -> TestBridgingConfig { - set_wococo_flavor(); - bridging_to_asset_hub_rococo() - } - - #[test] - fn limited_reserve_transfer_assets_for_native_asset_to_asset_hub_rococo_works() { - limited_reserve_transfer_assets_for_native_asset_over_bridge_works( - with_wococo_flavor_bridging_to_asset_hub_rococo, - ) - } - - #[test] - fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_works() { - const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; - asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - LocationToAccountId, - ForeignAssetsInstance, - >( - collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), - ExistentialDeposit::get(), - AccountId::from([73; 32]), - AccountId::from(BLOCK_AUTHOR_ACCOUNT), - // receiving ROCs - (MultiLocation { parents: 2, interior: X1(GlobalConsensus(Rococo)) }, 1000000000000, 1_000_000_000), - with_wococo_flavor_bridging_to_asset_hub_rococo, - ( - X1(PalletInstance(bp_bridge_hub_wococo::WITH_BRIDGE_WOCOCO_TO_ROCOCO_MESSAGES_PALLET_INDEX)), - GlobalConsensus(Rococo), - X1(Parachain(1000)) - ) - ) - } - - #[test] - fn report_bridge_status_from_xcm_bridge_router_works() { - asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - LocationToAccountId, - ToRococoXcmRouterInstance, - >( - collator_session_keys(), - with_wococo_flavor_bridging_to_asset_hub_rococo, - || { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_wococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_wococo::Call::ToRococoXcmRouter( - bp_asset_hub_wococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode() - .into(), - } - ] - .into() - }, - || { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_wococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_wococo::Call::ToRococoXcmRouter( - bp_asset_hub_wococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: false, - } - ) - .encode() - .into(), - } - ] - .into() - }, - ) - } - - #[test] - fn test_report_bridge_status_call_compatibility() { - // if this test fails, make sure `bp_asset_hub_rococo` has valid encoding - assert_eq!( - RuntimeCall::ToRococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode(), - bp_asset_hub_wococo::Call::ToRococoXcmRouter( - bp_asset_hub_wococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode() - ) - } - - #[test] - fn check_sane_weight_report_bridge_status() { - use pallet_xcm_bridge_hub_router::WeightInfo; - let actual = >::WeightInfo::report_bridge_status(); - let max_weight = bp_asset_hub_wococo::XcmBridgeHubRouterTransactCallMaxWeight::get(); - assert!( - actual.all_lte(max_weight), - "max_weight: {:?} should be adjusted to actual {:?}", - max_weight, - actual - ); - } -} - #[test] fn change_xcm_bridge_hub_router_byte_fee_by_governance_works() { asset_test_utils::test_cases::change_storage_constant_by_governance_works::< diff --git a/cumulus/parachains/runtimes/bridge-hubs/README.md b/cumulus/parachains/runtimes/bridge-hubs/README.md index b2a14a0405d..cf617db730d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/README.md +++ b/cumulus/parachains/runtimes/bridge-hubs/README.md @@ -1,14 +1,5 @@ - [Bridge-hub Parachains](#bridge-hub-parachains) - [Requirements for local run/testing](#requirements-for-local-runtesting) - - [How to test local Rococo <-> Wococo bridge](#how-to-test-local-rococo---wococo-bridge) - - [Run Rococo/Wococo chains with zombienet](#run-rococowococo-chains-with-zombienet) - - [Init bridge and run relayer between BridgeHubRococo and - BridgeHubWococo](#init-bridge-and-run-relayer-between-bridgehubrococo-and-bridgehubwococo) - - [Initialize configuration for transfer asset over bridge - (ROCs/WOCs)](#initialize-configuration-for-transfer-asset-over-bridge-rocswocs) - - [Send messages - transfer asset over bridge (ROCs/WOCs)](#send-messages---transfer-asset-over-bridge-rocswocs) - - [Claim relayer's rewards on BridgeHubRococo and - BridgeHubWococo](#claim-relayers-rewards-on-bridgehubrococo-and-bridgehubwococo) - [How to test local Rococo <-> Westend bridge](#how-to-test-local-rococo---westend-bridge) - [Run Rococo/Westend chains with zombienet](#run-rococowestend-chains-with-zombienet) - [Init bridge and run relayer between BridgeHubRococo and @@ -53,17 +44,7 @@ Copy the apropriate binary (zombienet-linux) from the latest release to ~/local_ --- # 2. Build polkadot binary -# If you want to test Kusama/Polkadot bridge, we need "sudo pallet + fast-runtime", -# so we need to use sudofi in polkadot directory. -# -# Install sudofi: (skip if already installed) -# cd -# git clone https://github.com/paritytech/parachain-utils.git -# cd parachain-utils # -> this is -# cargo build --release --bin sudofi -# -# cd /polkadot -# /target/release/sudofi +We need polkadot binary with "fast-runtime" feature: cd cargo build --release --features fast-runtime --bin polkadot @@ -100,112 +81,6 @@ cp target/release/polkadot-parachain ~/local_bridge_testing/bin/polkadot-paracha cp target/release/polkadot-parachain ~/local_bridge_testing/bin/polkadot-parachain-asset-hub ``` - -## How to test local Rococo <-> Wococo bridge - -### Run Rococo/Wococo chains with zombienet - -``` -cd - -# Rococo + BridgeHubRococo + AssetHub for Rococo (mirroring Kusama) -POLKADOT_BINARY_PATH=~/local_bridge_testing/bin/polkadot \ -POLKADOT_PARACHAIN_BINARY_PATH=~/local_bridge_testing/bin/polkadot-parachain \ -POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO=~/local_bridge_testing/bin/polkadot-parachain-asset-hub \ - ~/local_bridge_testing/bin/zombienet-linux --provider native spawn ./cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml -``` - -``` -cd - -# Wococo + BridgeHubWococo + AssetHub for Wococo (mirroring Polkadot) -POLKADOT_BINARY_PATH=~/local_bridge_testing/bin/polkadot \ -POLKADOT_PARACHAIN_BINARY_PATH=~/local_bridge_testing/bin/polkadot-parachain \ -POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO=~/local_bridge_testing/bin/polkadot-parachain-asset-hub \ - ~/local_bridge_testing/bin/zombienet-linux --provider native spawn ./cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml -``` - -### Init bridge and run relayer between BridgeHubRococo and BridgeHubWococo - -**Accounts of BridgeHub parachains:** -- `Bob` is pallet owner of all bridge pallets - -#### Run with script -``` -cd - -./cumulus/scripts/bridges_rococo_wococo.sh run-relay -``` - -**Check relay-chain headers relaying:** -- Rococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8943#/chainstate - Pallet: - **bridgeWococoGrandpa** - Keys: **bestFinalized()** -- Wococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8945#/chainstate - Pallet: - **bridgeRococoGrandpa** - Keys: **bestFinalized()** - -**Check parachain headers relaying:** -- Rococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8943#/chainstate - Pallet: - **bridgeWococoParachains** - Keys: **parasInfo(None)** -- Wococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8945#/chainstate - Pallet: - **bridgeRococoParachains** - Keys: **parasInfo(None)** - -### Initialize configuration for transfer asset over bridge (ROCs/WOCs) - -This initialization does several things: -- creates `ForeignAssets` for wrappedROCs/wrappedWOCs -- drips SA for AssetHubRococo on AssetHubWococo (and vice versa) which holds reserved assets on source chains -``` -cd - -./cumulus/scripts/bridges_rococo_wococo.sh init-asset-hub-rococo-local -./cumulus/scripts/bridges_rococo_wococo.sh init-bridge-hub-rococo-local -./cumulus/scripts/bridges_rococo_wococo.sh init-asset-hub-wococo-local -./cumulus/scripts/bridges_rococo_wococo.sh init-bridge-hub-wococo-local -``` - -### Send messages - transfer asset over bridge (ROCs/WOCs) - -Do (asset) transfers: -``` -cd - -# ROCs from Rococo's Asset Hub to Wococo's. -./cumulus/scripts/bridges_rococo_wococo.sh reserve-transfer-assets-from-asset-hub-rococo-local -``` -``` -cd - -# WOCs from Wococo's Asset Hub to Rococo's. -./cumulus/scripts/bridges_rococo_wococo.sh reserve-transfer-assets-from-asset-hub-wococo-local -``` - -- open explorers: (see zombienets) - - AssetHubRococo (see events `xcmpQueue.XcmpMessageSent`, `polkadotXcm.Attempted`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9910#/explorer - - BridgeHubRococo (see `bridgeWococoMessages.MessageAccepted`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer - - BridgeHubWococo (see `bridgeRococoMessages.MessagesReceived`, `xcmpQueue.XcmpMessageSent`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8945#/explorer - - AssetHubWococo (see `foreignAssets.Issued`, `xcmpQueue.Success`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9010#/explorer - - BridgeHubRocococ (see `bridgeWococoMessages.MessagesDelivered`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer - -### Claim relayer's rewards on BridgeHubRococo and BridgeHubWococo - -**Accounts of BridgeHub parachains:** -- `//Charlie` is relayer account on BridgeHubRococo -- `//Charlie` is relayer account on BridgeHubWococo - -``` -cd - -# Claim rewards on BridgeHubWococo: -./cumulus/scripts/bridges_rococo_wococo.sh claim-rewards-bridge-hub-rococo-local - -# Claim rewards on BridgeHubWococo: -./cumulus/scripts/bridges_rococo_wococo.sh claim-rewards-bridge-hub-wococo-local -``` - -- open explorers: (see zombienets) - - BridgeHubRococo (see 2x `bridgeRelayers.RewardPaid`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer - - BridgeHubWococo (see 2x `bridgeRelayers.RewardPaid`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8945#/explorer - ## How to test local Rococo <-> Westend bridge ### Run Rococo/Westend chains with zombienet diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 671d38e808f..c475768d5dd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -77,10 +77,8 @@ parachains-common = { path = "../../../common", default-features = false } # Bridges bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-asset-hub-wococo = { path = "../../../../../bridges/primitives/chain-asset-hub-wococo", default-features = false } bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } -bp-bridge-hub-wococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-wococo", default-features = false } bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } @@ -89,7 +87,6 @@ bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-fea bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } bp-rococo = { path = "../../../../../bridges/primitives/chain-rococo", default-features = false } bp-westend = { path = "../../../../../bridges/primitives/chain-westend", default-features = false } -bp-wococo = { path = "../../../../../bridges/primitives/chain-wococo", default-features = false } pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } @@ -107,10 +104,8 @@ default = [ "std" ] std = [ "bp-asset-hub-rococo/std", "bp-asset-hub-westend/std", - "bp-asset-hub-wococo/std", "bp-bridge-hub-rococo/std", "bp-bridge-hub-westend/std", - "bp-bridge-hub-wococo/std", "bp-header-chain/std", "bp-messages/std", "bp-parachains/std", @@ -119,7 +114,6 @@ std = [ "bp-rococo/std", "bp-runtime/std", "bp-westend/std", - "bp-wococo/std", "bridge-runtime-common/std", "codec/std", "cumulus-pallet-aura-ext/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs index 296ec88a856..8153e52beac 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs @@ -29,10 +29,6 @@ parameter_types! { pub const RelayChainHeadersToKeep: u32 = 1024; pub const ParachainHeadsToKeep: u32 = 64; - pub const RococoBridgeParachainPalletName: &'static str = "Paras"; - pub const MaxRococoParaHeadDataSize: u32 = bp_rococo::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; - pub const WococoBridgeParachainPalletName: &'static str = "Paras"; - pub const MaxWococoParaHeadDataSize: u32 = bp_wococo::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; pub const WestendBridgeParachainPalletName: &'static str = "Paras"; pub const MaxWestendParaHeadDataSize: u32 = bp_westend::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; @@ -43,52 +39,6 @@ parameter_types! { pub storage DeliveryRewardInBalance: u64 = 1_000_000; } -/// Add GRANDPA bridge pallet to track Wococo relay chain. -pub type BridgeGrandpaWococoInstance = pallet_bridge_grandpa::Instance1; -impl pallet_bridge_grandpa::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type BridgedChain = bp_wococo::Wococo; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; - type HeadersToKeep = RelayChainHeadersToKeep; - type WeightInfo = weights::pallet_bridge_grandpa_wococo_finality::WeightInfo; -} - -/// Add parachain bridge pallet to track Wococo BridgeHub parachain -pub type BridgeParachainWococoInstance = pallet_bridge_parachains::Instance1; -impl pallet_bridge_parachains::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_parachains_within_wococo::WeightInfo; - type BridgesGrandpaPalletInstance = BridgeGrandpaWococoInstance; - type ParasPalletName = WococoBridgeParachainPalletName; - type ParaStoredHeaderDataBuilder = - SingleParaStoredHeaderDataBuilder; - type HeadsToKeep = ParachainHeadsToKeep; - type MaxParaHeadDataSize = MaxWococoParaHeadDataSize; -} - -/// Add GRANDPA bridge pallet to track Rococo relay chain. -pub type BridgeGrandpaRococoInstance = pallet_bridge_grandpa::Instance2; -impl pallet_bridge_grandpa::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type BridgedChain = bp_rococo::Rococo; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; - type HeadersToKeep = RelayChainHeadersToKeep; - type WeightInfo = weights::pallet_bridge_grandpa_rococo_finality::WeightInfo; -} - -/// Add parachain bridge pallet to track Rococo BridgeHub parachain -pub type BridgeParachainRococoInstance = pallet_bridge_parachains::Instance2; -impl pallet_bridge_parachains::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_parachains_within_rococo::WeightInfo; - type BridgesGrandpaPalletInstance = BridgeGrandpaRococoInstance; - type ParasPalletName = RococoBridgeParachainPalletName; - type ParaStoredHeaderDataBuilder = - SingleParaStoredHeaderDataBuilder; - type HeadsToKeep = ParachainHeadsToKeep; - type MaxParaHeadDataSize = MaxRococoParaHeadDataSize; -} - /// Add GRANDPA bridge pallet to track Westend relay chain. pub type BridgeGrandpaWestendInstance = pallet_bridge_grandpa::Instance3; impl pallet_bridge_grandpa::Config for Runtime { @@ -96,14 +46,14 @@ impl pallet_bridge_grandpa::Config for Runtime { type BridgedChain = bp_westend::Westend; type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; type HeadersToKeep = RelayChainHeadersToKeep; - type WeightInfo = weights::pallet_bridge_grandpa_westend_finality::WeightInfo; + type WeightInfo = weights::pallet_bridge_grandpa::WeightInfo; } /// Add parachain bridge pallet to track Westend BridgeHub parachain pub type BridgeParachainWestendInstance = pallet_bridge_parachains::Instance3; impl pallet_bridge_parachains::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_parachains_within_westend::WeightInfo; + type WeightInfo = weights::pallet_bridge_parachains::WeightInfo; type BridgesGrandpaPalletInstance = BridgeGrandpaWestendInstance; type ParasPalletName = WestendBridgeParachainPalletName; type ParaStoredHeaderDataBuilder = diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_rococo_config.rs deleted file mode 100644 index 35497c84068..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_rococo_config.rs +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Bridge definitions used on BridgeHub with the Wococo flavor for bridging to BridgeHubRococo. - -use crate::{ - bridge_common_config::{BridgeParachainRococoInstance, DeliveryRewardInBalance}, - weights, AccountId, BridgeRococoMessages, ParachainInfo, Runtime, RuntimeEvent, RuntimeOrigin, - XcmRouter, -}; -use bp_messages::LaneId; -use bridge_runtime_common::{ - messages, - messages::{ - source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, - MessageBridge, ThisChainWithMessages, UnderlyingChainProvider, - }, - messages_xcm_extension::{ - SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, - XcmBlobMessageDispatch, - }, - refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, RefundableParachain, - }, -}; -use codec::Encode; -use frame_support::{parameter_types, traits::PalletInfoAccess}; -use sp_runtime::RuntimeDebug; -use xcm::{ - latest::prelude::*, - prelude::{InteriorMultiLocation, NetworkId}, -}; -use xcm_builder::{BridgeBlobDispatcher, HaulBlobExporter}; - -parameter_types! { - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_wococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_wococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - pub const BridgeHubRococoChainId: bp_runtime::ChainId = bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID; - pub BridgeHubWococoUniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(Wococo), Parachain(ParachainInfo::parachain_id().into())); - pub BridgeWococoToRococoMessagesPalletInstance: InteriorMultiLocation = X1(PalletInstance(::index() as u8)); - pub RococoGlobalConsensusNetwork: NetworkId = NetworkId::Rococo; - pub ActiveOutboundLanesToBridgeHubRococo: &'static [bp_messages::LaneId] = &[XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO]; - pub const AssetHubWococoToAssetHubRococoMessagesLane: bp_messages::LaneId = XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO; - // see the `FEE_BOOST_PER_MESSAGE` constant to get the meaning of this value - pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; - - pub AssetHubWococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_wococo::ASSET_HUB_WOCOCO_PARACHAIN_ID.into(); - pub AssetHubRococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID.into(); - - pub FromAssetHubWococoToAssetHubRococoRoute: SenderAndLane = SenderAndLane::new( - ParentThen(X1(Parachain(AssetHubWococoParaId::get().into()))).into(), - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - ); - - pub CongestedMessage: Xcm<()> = build_congestion_message(true).into(); - - pub UncongestedMessage: Xcm<()> = build_congestion_message(false).into(); -} -pub const XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO: LaneId = LaneId([0, 0, 0, 1]); - -fn build_congestion_message(is_congested: bool) -> sp_std::vec::Vec> { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_wococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_wococo::Call::ToRococoXcmRouter( - bp_asset_hub_wococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested, - } - ) - .encode() - .into(), - } - ] -} - -/// Proof of messages, coming from Rococo. -pub type FromRococoBridgeHubMessagesProof = - FromBridgedChainMessagesProof; -/// Messages delivery proof for RococoBridge Hub -> Wococo BridgeHub messages. -pub type ToRococoBridgeHubMessagesDeliveryProof = - FromBridgedChainMessagesDeliveryProof; - -/// Dispatches received XCM messages from other bridge -type FromRococoMessageBlobDispatcher = BridgeBlobDispatcher< - XcmRouter, - BridgeHubWococoUniversalLocation, - BridgeWococoToRococoMessagesPalletInstance, ->; - -/// Export XCM messages to be relayed to the other side -pub type ToBridgeHubRococoHaulBlobExporter = HaulBlobExporter< - XcmBlobHaulerAdapter, - RococoGlobalConsensusNetwork, - (), ->; -pub struct ToBridgeHubRococoXcmBlobHauler; -impl XcmBlobHauler for ToBridgeHubRococoXcmBlobHauler { - type Runtime = Runtime; - type MessagesInstance = WithBridgeHubRococoMessagesInstance; - type SenderAndLane = FromAssetHubWococoToAssetHubRococoRoute; - - type ToSourceChainSender = XcmRouter; - type CongestedMessage = CongestedMessage; - type UncongestedMessage = UncongestedMessage; -} - -/// On messages delivered callback. -type OnMessagesDelivered = XcmBlobHaulerAdapter; - -/// Messaging Bridge configuration for BridgeHubWococo -> BridgeHubRococo -pub struct WithBridgeHubRococoMessageBridge; -impl MessageBridge for WithBridgeHubRococoMessageBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_bridge_hub_wococo::WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME; - type ThisChain = BridgeHubWococo; - type BridgedChain = BridgeHubRococo; - type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< - Runtime, - BridgeParachainRococoInstance, - bp_bridge_hub_rococo::BridgeHubRococo, - >; -} - -/// Message verifier for BridgeHubRococo messages sent from BridgeHubWococo -pub type ToBridgeHubRococoMessageVerifier = - messages::source::FromThisChainMessageVerifier; - -/// Maximal outbound payload size of BridgeHubWococo -> BridgeHubRococo messages. -pub type ToBridgeHubRococoMaximalOutboundPayloadSize = - messages::source::FromThisChainMaximalOutboundPayloadSize; - -/// BridgeHubRococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubRococo; - -impl UnderlyingChainProvider for BridgeHubRococo { - type Chain = bp_bridge_hub_rococo::BridgeHubRococo; -} - -impl messages::BridgedChainWithMessages for BridgeHubRococo {} - -/// BridgeHubWococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubWococo; - -impl UnderlyingChainProvider for BridgeHubWococo { - type Chain = bp_bridge_hub_wococo::BridgeHubWococo; -} - -impl ThisChainWithMessages for BridgeHubWococo { - type RuntimeOrigin = RuntimeOrigin; -} - -/// Signed extension that refunds relayers that are delivering messages from the Rococo parachain. -pub type OnBridgeHubWococoRefundBridgeHubRococoMessages = RefundSignedExtensionAdapter< - RefundBridgedParachainMessages< - Runtime, - RefundableParachain, - RefundableMessagesLane< - WithBridgeHubRococoMessagesInstance, - AssetHubWococoToAssetHubRococoMessagesLane, - >, - ActualFeeRefund, - PriorityBoostPerMessage, - StrOnBridgeHubWococoRefundBridgeHubRococoMessages, - >, ->; -bp_runtime::generate_static_str_provider!(OnBridgeHubWococoRefundBridgeHubRococoMessages); - -/// Add XCM messages support for BridgeHubWococo to support Wococo->Rococo XCM messages -pub type WithBridgeHubRococoMessagesInstance = pallet_bridge_messages::Instance2; -impl pallet_bridge_messages::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_messages_wococo_to_rococo::WeightInfo; - type BridgedChainId = BridgeHubRococoChainId; - type ActiveOutboundLanes = ActiveOutboundLanesToBridgeHubRococo; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type MaximalOutboundPayloadSize = ToBridgeHubRococoMaximalOutboundPayloadSize; - type OutboundPayload = XcmAsPlainPayload; - - type InboundPayload = XcmAsPlainPayload; - type InboundRelayer = AccountId; - type DeliveryPayments = (); - - type TargetHeaderChain = TargetHeaderChainAdapter; - type LaneMessageVerifier = ToBridgeHubRococoMessageVerifier; - type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< - Runtime, - WithBridgeHubRococoMessagesInstance, - DeliveryRewardInBalance, - >; - - type SourceHeaderChain = SourceHeaderChainAdapter; - type MessageDispatch = XcmBlobMessageDispatch< - FromRococoMessageBlobDispatcher, - Self::WeightInfo, - cumulus_pallet_xcmp_queue::bridging::OutXcmpChannelStatusProvider< - AssetHubWococoParaId, - Runtime, - >, - >; - type OnMessagesDelivered = OnMessagesDelivered; -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::bridge_common_config::BridgeGrandpaRococoInstance; - use bridge_runtime_common::{ - assert_complete_bridge_types, - integrity::{ - assert_complete_bridge_constants, check_message_lane_weights, - AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, - AssertCompleteBridgeConstants, - }, - }; - use parachains_common::{wococo, Balance}; - - /// Every additional message in the message delivery transaction boosts its priority. - /// So the priority of transaction with `N+1` messages is larger than priority of - /// transaction with `N` messages by the `PriorityBoostPerMessage`. - /// - /// Economically, it is an equivalent of adding tip to the transaction with `N` messages. - /// The `FEE_BOOST_PER_MESSAGE` constant is the value of this tip. - /// - /// We want this tip to be large enough (delivery transactions with more messages = less - /// operational costs and a faster bridge), so this value should be significant. - const FEE_BOOST_PER_MESSAGE: Balance = 2 * wococo::currency::UNITS; - - #[test] - fn ensure_bridge_hub_wococo_message_lane_weights_are_correct() { - check_message_lane_weights::< - bp_bridge_hub_wococo::BridgeHubWococo, - Runtime, - WithBridgeHubRococoMessagesInstance, - >( - bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE, - bp_bridge_hub_wococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - bp_bridge_hub_wococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - true, - ); - } - - #[test] - fn ensure_bridge_integrity() { - assert_complete_bridge_types!( - runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaRococoInstance, - with_bridged_chain_messages_instance: WithBridgeHubRococoMessagesInstance, - bridge: WithBridgeHubRococoMessageBridge, - this_chain: bp_wococo::Wococo, - bridged_chain: bp_rococo::Rococo, - ); - - assert_complete_bridge_constants::< - Runtime, - BridgeGrandpaRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >(AssertCompleteBridgeConstants { - this_chain_constants: AssertChainConstants { - block_length: bp_bridge_hub_wococo::BlockLength::get(), - block_weights: bp_bridge_hub_wococo::BlockWeights::get(), - }, - messages_pallet_constants: AssertBridgeMessagesPalletConstants { - max_unrewarded_relayers_in_bridged_confirmation_tx: - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_messages_in_bridged_confirmation_tx: - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - bridged_chain_id: bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID, - }, - pallet_names: AssertBridgePalletNames { - with_this_chain_messages_pallet_name: - bp_bridge_hub_wococo::WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME, - with_bridged_chain_grandpa_pallet_name: bp_rococo::WITH_ROCOCO_GRANDPA_PALLET_NAME, - with_bridged_chain_messages_pallet_name: - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME, - }, - }); - - bridge_runtime_common::priority_calculator::ensure_priority_boost_is_sane::< - Runtime, - WithBridgeHubRococoMessagesInstance, - PriorityBoostPerMessage, - >(FEE_BOOST_PER_MESSAGE); - - assert_eq!( - BridgeWococoToRococoMessagesPalletInstance::get(), - X1(PalletInstance( - bp_bridge_hub_wococo::WITH_BRIDGE_WOCOCO_TO_ROCOCO_MESSAGES_PALLET_INDEX - )) - ); - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index 36dcab09dea..f3c1c9597b5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -196,7 +196,7 @@ bp_runtime::generate_static_str_provider!(OnBridgeHubRococoRefundBridgeHubWesten pub type WithBridgeHubWestendMessagesInstance = pallet_bridge_messages::Instance3; impl pallet_bridge_messages::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_messages_rococo_to_westend::WeightInfo; + type WeightInfo = weights::pallet_bridge_messages::WeightInfo; type BridgedChainId = BridgeHubWestendChainId; type ActiveOutboundLanes = ActiveOutboundLanesToBridgeHubWestend; type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_wococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_wococo_config.rs deleted file mode 100644 index 7780b02632c..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_wococo_config.rs +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Bridge definitions used on BridgeHub with the Rococo flavor for bridging to BridgeHubWococo. - -use crate::{ - bridge_common_config::{BridgeParachainWococoInstance, DeliveryRewardInBalance}, - weights, AccountId, BridgeWococoMessages, ParachainInfo, Runtime, RuntimeEvent, RuntimeOrigin, - XcmRouter, -}; -use bp_messages::LaneId; -use bridge_runtime_common::{ - messages, - messages::{ - source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, - MessageBridge, ThisChainWithMessages, UnderlyingChainProvider, - }, - messages_xcm_extension::{ - SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, - XcmBlobMessageDispatch, - }, - refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, RefundableParachain, - }, -}; - -use codec::Encode; -use frame_support::{parameter_types, traits::PalletInfoAccess}; -use sp_runtime::RuntimeDebug; -use xcm::{ - latest::prelude::*, - prelude::{InteriorMultiLocation, NetworkId}, -}; -use xcm_builder::{BridgeBlobDispatcher, HaulBlobExporter}; - -parameter_types! { - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - pub const BridgeHubWococoChainId: bp_runtime::ChainId = bp_runtime::BRIDGE_HUB_WOCOCO_CHAIN_ID; - pub BridgeRococoToWococoMessagesPalletInstance: InteriorMultiLocation = X1(PalletInstance(::index() as u8)); - pub BridgeHubRococoUniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(Rococo), Parachain(ParachainInfo::parachain_id().into())); - pub WococoGlobalConsensusNetwork: NetworkId = NetworkId::Wococo; - pub ActiveOutboundLanesToBridgeHubWococo: &'static [bp_messages::LaneId] = &[XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO]; - pub const AssetHubRococoToAssetHubWococoMessagesLane: bp_messages::LaneId = XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO; - // see the `FEE_BOOST_PER_MESSAGE` constant to get the meaning of this value - pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; - - pub AssetHubRococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID.into(); - pub AssetHubWococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_wococo::ASSET_HUB_WOCOCO_PARACHAIN_ID.into(); - - pub FromAssetHubRococoToAssetHubWococoRoute: SenderAndLane = SenderAndLane::new( - ParentThen(X1(Parachain(AssetHubRococoParaId::get().into()))).into(), - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - ); - - pub CongestedMessage: Xcm<()> = build_congestion_message(true).into(); - - pub UncongestedMessage: Xcm<()> = build_congestion_message(false).into(); -} -pub const XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO: LaneId = LaneId([0, 0, 0, 1]); - -fn build_congestion_message(is_congested: bool) -> sp_std::vec::Vec> { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_rococo::Call::ToWococoXcmRouter( - bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested, - } - ) - .encode() - .into(), - } - ] -} - -/// Proof of messages, coming from Wococo. -pub type FromWococoBridgeHubMessagesProof = - FromBridgedChainMessagesProof; -/// Messages delivery proof for Rococo Bridge Hub -> Wococo Bridge Hub messages. -pub type ToWococoBridgeHubMessagesDeliveryProof = - FromBridgedChainMessagesDeliveryProof; - -/// Dispatches received XCM messages from other bridge -type FromWococoMessageBlobDispatcher = BridgeBlobDispatcher< - XcmRouter, - BridgeHubRococoUniversalLocation, - BridgeRococoToWococoMessagesPalletInstance, ->; - -/// Export XCM messages to be relayed to the other side -pub type ToBridgeHubWococoHaulBlobExporter = HaulBlobExporter< - XcmBlobHaulerAdapter, - WococoGlobalConsensusNetwork, - (), ->; -pub struct ToBridgeHubWococoXcmBlobHauler; -impl XcmBlobHauler for ToBridgeHubWococoXcmBlobHauler { - type Runtime = Runtime; - type MessagesInstance = WithBridgeHubWococoMessagesInstance; - type SenderAndLane = FromAssetHubRococoToAssetHubWococoRoute; - - type ToSourceChainSender = XcmRouter; - type CongestedMessage = CongestedMessage; - type UncongestedMessage = UncongestedMessage; -} - -/// On messages delivered callback. -type OnMessagesDeliveredFromWococo = XcmBlobHaulerAdapter; - -/// Messaging Bridge configuration for BridgeHubRococo -> BridgeHubWococo -pub struct WithBridgeHubWococoMessageBridge; -impl MessageBridge for WithBridgeHubWococoMessageBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME; - type ThisChain = BridgeHubRococo; - type BridgedChain = BridgeHubWococo; - type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< - Runtime, - BridgeParachainWococoInstance, - bp_bridge_hub_wococo::BridgeHubWococo, - >; -} - -/// Message verifier for BridgeHubWococo messages sent from BridgeHubRococo -pub type ToBridgeHubWococoMessageVerifier = - messages::source::FromThisChainMessageVerifier; - -/// Maximal outbound payload size of BridgeHubRococo -> BridgeHubWococo messages. -pub type ToBridgeHubWococoMaximalOutboundPayloadSize = - messages::source::FromThisChainMaximalOutboundPayloadSize; - -/// BridgeHubWococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubWococo; - -impl UnderlyingChainProvider for BridgeHubWococo { - type Chain = bp_bridge_hub_wococo::BridgeHubWococo; -} - -impl messages::BridgedChainWithMessages for BridgeHubWococo {} - -/// BridgeHubRococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubRococo; - -impl UnderlyingChainProvider for BridgeHubRococo { - type Chain = bp_bridge_hub_rococo::BridgeHubRococo; -} - -impl ThisChainWithMessages for BridgeHubRococo { - type RuntimeOrigin = RuntimeOrigin; -} - -/// Signed extension that refunds relayers that are delivering messages from the Wococo parachain. -pub type OnBridgeHubRococoRefundBridgeHubWococoMessages = RefundSignedExtensionAdapter< - RefundBridgedParachainMessages< - Runtime, - RefundableParachain, - RefundableMessagesLane< - WithBridgeHubWococoMessagesInstance, - AssetHubRococoToAssetHubWococoMessagesLane, - >, - ActualFeeRefund, - PriorityBoostPerMessage, - StrOnBridgeHubRococoRefundBridgeHubWococoMessages, - >, ->; -bp_runtime::generate_static_str_provider!(OnBridgeHubRococoRefundBridgeHubWococoMessages); - -/// Add XCM messages support for BridgeHubRococo to support Rococo->Wococo XCM messages -pub type WithBridgeHubWococoMessagesInstance = pallet_bridge_messages::Instance1; -impl pallet_bridge_messages::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_messages_rococo_to_wococo::WeightInfo; - type BridgedChainId = BridgeHubWococoChainId; - type ActiveOutboundLanes = ActiveOutboundLanesToBridgeHubWococo; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type MaximalOutboundPayloadSize = ToBridgeHubWococoMaximalOutboundPayloadSize; - type OutboundPayload = XcmAsPlainPayload; - - type InboundPayload = XcmAsPlainPayload; - type InboundRelayer = AccountId; - type DeliveryPayments = (); - - type TargetHeaderChain = TargetHeaderChainAdapter; - type LaneMessageVerifier = ToBridgeHubWococoMessageVerifier; - type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< - Runtime, - WithBridgeHubWococoMessagesInstance, - DeliveryRewardInBalance, - >; - - type SourceHeaderChain = SourceHeaderChainAdapter; - type MessageDispatch = XcmBlobMessageDispatch< - FromWococoMessageBlobDispatcher, - Self::WeightInfo, - cumulus_pallet_xcmp_queue::bridging::OutXcmpChannelStatusProvider< - AssetHubRococoParaId, - Runtime, - >, - >; - type OnMessagesDelivered = OnMessagesDeliveredFromWococo; -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::bridge_common_config::BridgeGrandpaWococoInstance; - use bridge_runtime_common::{ - assert_complete_bridge_types, - integrity::{ - assert_complete_bridge_constants, check_message_lane_weights, - AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, - AssertCompleteBridgeConstants, - }, - }; - use parachains_common::{rococo, Balance}; - - /// Every additional message in the message delivery transaction boosts its priority. - /// So the priority of transaction with `N+1` messages is larger than priority of - /// transaction with `N` messages by the `PriorityBoostPerMessage`. - /// - /// Economically, it is an equivalent of adding tip to the transaction with `N` messages. - /// The `FEE_BOOST_PER_MESSAGE` constant is the value of this tip. - /// - /// We want this tip to be large enough (delivery transactions with more messages = less - /// operational costs and a faster bridge), so this value should be significant. - const FEE_BOOST_PER_MESSAGE: Balance = 2 * rococo::currency::UNITS; - - #[test] - fn ensure_bridge_hub_rococo_message_lane_weights_are_correct() { - check_message_lane_weights::< - bp_bridge_hub_rococo::BridgeHubRococo, - Runtime, - WithBridgeHubWococoMessagesInstance, - >( - bp_bridge_hub_wococo::EXTRA_STORAGE_PROOF_SIZE, - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - true, - ); - } - - #[test] - fn ensure_bridge_integrity() { - assert_complete_bridge_types!( - runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaWococoInstance, - with_bridged_chain_messages_instance: WithBridgeHubWococoMessagesInstance, - bridge: WithBridgeHubWococoMessageBridge, - this_chain: bp_rococo::Rococo, - bridged_chain: bp_wococo::Wococo, - ); - - assert_complete_bridge_constants::< - Runtime, - BridgeGrandpaWococoInstance, - WithBridgeHubWococoMessagesInstance, - WithBridgeHubWococoMessageBridge, - >(AssertCompleteBridgeConstants { - this_chain_constants: AssertChainConstants { - block_length: bp_bridge_hub_rococo::BlockLength::get(), - block_weights: bp_bridge_hub_rococo::BlockWeights::get(), - }, - messages_pallet_constants: AssertBridgeMessagesPalletConstants { - max_unrewarded_relayers_in_bridged_confirmation_tx: - bp_bridge_hub_wococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_messages_in_bridged_confirmation_tx: - bp_bridge_hub_wococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - bridged_chain_id: bp_runtime::BRIDGE_HUB_WOCOCO_CHAIN_ID, - }, - pallet_names: AssertBridgePalletNames { - with_this_chain_messages_pallet_name: - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME, - with_bridged_chain_grandpa_pallet_name: bp_wococo::WITH_WOCOCO_GRANDPA_PALLET_NAME, - with_bridged_chain_messages_pallet_name: - bp_bridge_hub_wococo::WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME, - }, - }); - - bridge_runtime_common::priority_calculator::ensure_priority_boost_is_sane::< - Runtime, - WithBridgeHubWococoMessagesInstance, - PriorityBoostPerMessage, - >(FEE_BOOST_PER_MESSAGE); - - assert_eq!( - BridgeRococoToWococoMessagesPalletInstance::get(), - X1(PalletInstance( - bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WOCOCO_MESSAGES_PALLET_INDEX - )) - ); - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index b17d308b891..b8fc2fffc88 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -16,14 +16,7 @@ //! # Bridge Hub Rococo Runtime //! -//! This runtime is also used for Bridge Hub Wococo. We dont want to create -//! another exact copy of Bridge Hub Rococo, so we injected some tweaks backed by `RuntimeFlavor` -//! and `pub storage Flavor: RuntimeFlavor`. (For example this is needed for successful asset -//! transfer between Asset Hub Rococo and Asset Hub Wococo, where we need to have correct -//! `xcm_config::UniversalLocation` with correct `GlobalConsensus`. -//! //! This runtime currently supports bridging between: -//! - Rococo <> Wococo //! - Rococo <> Westend #![cfg_attr(not(feature = "std"), no_std)] @@ -35,13 +28,10 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod bridge_common_config; -pub mod bridge_to_rococo_config; pub mod bridge_to_westend_config; -pub mod bridge_to_wococo_config; mod weights; pub mod xcm_config; -use codec::{Decode, Encode}; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; @@ -93,15 +83,6 @@ use parachains_common::{ HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; -/// Enum for handling differences in the runtime configuration for BridgeHubRococo vs -/// BridgeHubWococo. -#[derive(Default, Eq, PartialEq, Debug, Clone, Copy, Decode, Encode)] -pub enum RuntimeFlavor { - #[default] - Rococo, - Wococo, -} - /// The address format for describing accounts. pub type Address = MultiAddress; @@ -125,11 +106,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, BridgeRejectObsoleteHeadersAndMessages, - ( - bridge_to_wococo_config::OnBridgeHubRococoRefundBridgeHubWococoMessages, - bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages, - bridge_to_rococo_config::OnBridgeHubWococoRefundBridgeHubRococoMessages, - ), + (bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages,), ); /// Unchecked extrinsic type as expected by this runtime. @@ -529,37 +506,19 @@ construct_runtime!( Utility: pallet_utility::{Pallet, Call, Event} = 40, Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 36, - // Rococo, Wococo and Westend BridgeHubs are sharing the runtime, so this runtime has several sets of - // bridge pallets. - // // BridgeHubRococo uses: - // - BridgeWococoGrandpa // - BridgeWestendGrandpa - // - BridgeWococoParachains // - BridgeWestendParachains - // - BridgeWococoMessages // - BridgeWestendMessages // - BridgeRelayers - // - // BridgeHubWococo uses: - // - BridgeRococoGrandpa - // - BridgeRococoParachains - // - BridgeRococoMessages - // - BridgeRelayers // GRANDPA bridge modules. - BridgeWococoGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 41, - BridgeRococoGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 43, BridgeWestendGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 48, // Parachain bridge modules. - BridgeWococoParachains: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 42, - BridgeRococoParachains: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 44, BridgeWestendParachains: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 49, // Messaging bridge modules. - BridgeWococoMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 46, - BridgeRococoMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 45, BridgeWestendMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 51, BridgeRelayers: pallet_bridge_relayers::{Pallet, Call, Storage, Event} = 47, @@ -573,11 +532,11 @@ construct_runtime!( bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { RuntimeCall, AccountId, // Grandpa - BridgeRococoGrandpa, BridgeWococoGrandpa, BridgeWestendGrandpa, + BridgeWestendGrandpa, // Parachains - BridgeRococoParachains, BridgeWococoParachains, BridgeWestendParachains, + BridgeWestendParachains, // Messages - BridgeRococoMessages, BridgeWococoMessages, BridgeWestendMessages + BridgeWestendMessages } #[cfg(feature = "runtime-benchmarks")] @@ -600,15 +559,9 @@ mod benches { [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] // Bridge pallets - [pallet_bridge_grandpa, WococoFinality] [pallet_bridge_grandpa, WestendFinality] - [pallet_bridge_grandpa, RococoFinality] - [pallet_bridge_parachains, WithinWococo] [pallet_bridge_parachains, WithinWestend] - [pallet_bridge_parachains, WithinRococo] - [pallet_bridge_messages, RococoToWococo] [pallet_bridge_messages, RococoToWestend] - [pallet_bridge_messages, WococoToRococo] [pallet_bridge_relayers, BridgeRelayersBench::] ); } @@ -757,26 +710,6 @@ impl_runtime_apis! { } } - impl bp_rococo::RococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeRococoGrandpa::best_finalized() - } - fn synced_headers_grandpa_info( - ) -> Vec> { - BridgeRococoGrandpa::synced_headers_grandpa_info() - } - } - - impl bp_wococo::WococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeWococoGrandpa::best_finalized() - } - fn synced_headers_grandpa_info( - ) -> Vec> { - BridgeWococoGrandpa::synced_headers_grandpa_info() - } - } - impl bp_westend::WestendFinalityApi for Runtime { fn best_finalized() -> Option> { BridgeWestendGrandpa::best_finalized() @@ -787,22 +720,6 @@ impl_runtime_apis! { } } - impl bp_bridge_hub_rococo::BridgeHubRococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeRococoParachains::best_parachain_head_id::< - bp_bridge_hub_rococo::BridgeHubRococo - >().unwrap_or(None) - } - } - - impl bp_bridge_hub_wococo::BridgeHubWococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeWococoParachains::best_parachain_head_id::< - bp_bridge_hub_wococo::BridgeHubWococo - >().unwrap_or(None) - } - } - impl bp_bridge_hub_westend::BridgeHubWestendFinalityApi for Runtime { fn best_finalized() -> Option> { BridgeWestendParachains::best_parachain_head_id::< @@ -811,33 +728,6 @@ impl_runtime_apis! { } } - // This is exposed by BridgeHubRococo - impl bp_bridge_hub_wococo::FromBridgeHubWococoInboundLaneApi for Runtime { - fn message_details( - lane: bp_messages::LaneId, - messages: Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, - ) -> Vec { - bridge_runtime_common::messages_api::inbound_message_details::< - Runtime, - bridge_to_wococo_config::WithBridgeHubWococoMessagesInstance, - >(lane, messages) - } - } - - // This is exposed by BridgeHubRococo - impl bp_bridge_hub_wococo::ToBridgeHubWococoOutboundLaneApi for Runtime { - fn message_details( - lane: bp_messages::LaneId, - begin: bp_messages::MessageNonce, - end: bp_messages::MessageNonce, - ) -> Vec { - bridge_runtime_common::messages_api::outbound_message_details::< - Runtime, - bridge_to_wococo_config::WithBridgeHubWococoMessagesInstance, - >(lane, begin, end) - } - } - // This is exposed by BridgeHubRococo impl bp_bridge_hub_westend::FromBridgeHubWestendInboundLaneApi for Runtime { fn message_details( @@ -865,45 +755,6 @@ impl_runtime_apis! { } } - // This is exposed by BridgeHubWococo - impl bp_bridge_hub_rococo::FromBridgeHubRococoInboundLaneApi for Runtime { - fn message_details( - lane: bp_messages::LaneId, - messages: Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, - ) -> Vec { - // use different instance according to flavor - match xcm_config::Flavor::get() { - RuntimeFlavor::Wococo => { - bridge_runtime_common::messages_api::inbound_message_details::< - Runtime, - bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, - >(lane, messages) - }, - flavor @ _ => unimplemented!("Unsupported `FromBridgeHubRococoInboundLaneApi` for flavor: {:?}", flavor) - } - } - } - - // This is exposed by BridgeHubWococo and BridgeHubWestend - impl bp_bridge_hub_rococo::ToBridgeHubRococoOutboundLaneApi for Runtime { - fn message_details( - lane: bp_messages::LaneId, - begin: bp_messages::MessageNonce, - end: bp_messages::MessageNonce, - ) -> Vec { - // use different instance according to flavor - match xcm_config::Flavor::get() { - RuntimeFlavor::Wococo => { - bridge_runtime_common::messages_api::outbound_message_details::< - Runtime, - bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, - >(lane, begin, end) - }, - flavor @ _ => unimplemented!("Unsupported `ToBridgeHubRococoOutboundLaneApi` for flavor: {:?}", flavor) - } - } - } - #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { @@ -943,15 +794,9 @@ impl_runtime_apis! { use pallet_bridge_relayers::benchmarking::Pallet as BridgeRelayersBench; // Change weight file names. - type WococoFinality = BridgeWococoGrandpa; type WestendFinality = BridgeWestendGrandpa; - type RococoFinality = BridgeRococoGrandpa; - type WithinWococo = pallet_bridge_parachains::benchmarking::Pallet::; type WithinWestend = pallet_bridge_parachains::benchmarking::Pallet::; - type WithinRococo = pallet_bridge_parachains::benchmarking::Pallet::; - type RococoToWococo = pallet_bridge_messages::benchmarking::Pallet ::; type RococoToWestend = pallet_bridge_messages::benchmarking::Pallet ::; - type WococoToRococo = pallet_bridge_messages::benchmarking::Pallet ::; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -1098,7 +943,7 @@ impl_runtime_apis! { fn export_message_origin_and_destination( ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError> { - Ok((TokenLocation::get(), NetworkId::Wococo, X1(Parachain(100)))) + Ok((TokenLocation::get(), NetworkId::Westend, X1(Parachain(100)))) } fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError> { @@ -1109,15 +954,9 @@ impl_runtime_apis! { type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - type WococoFinality = BridgeWococoGrandpa; type WestendFinality = BridgeWestendGrandpa; - type RococoFinality = BridgeRococoGrandpa; - type WithinWococo = pallet_bridge_parachains::benchmarking::Pallet::; type WithinWestend = pallet_bridge_parachains::benchmarking::Pallet::; - type WithinRococo = pallet_bridge_parachains::benchmarking::Pallet::; - type RococoToWococo = pallet_bridge_messages::benchmarking::Pallet ::; type RococoToWestend = pallet_bridge_messages::benchmarking::Pallet ::; - type WococoToRococo = pallet_bridge_messages::benchmarking::Pallet ::; use bridge_runtime_common::messages_benchmarking::{ prepare_message_delivery_proof_from_parachain, @@ -1130,49 +969,6 @@ impl_runtime_apis! { MessageProofParams, }; - impl BridgeMessagesConfig for Runtime { - fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool { - let bench_lane_id = >::bench_lane_id(); - let bridged_chain_id = bp_runtime::BRIDGE_HUB_WOCOCO_CHAIN_ID; - pallet_bridge_relayers::Pallet::::relayer_reward( - relayer, - bp_relayers::RewardsAccountParams::new( - bench_lane_id, - bridged_chain_id, - bp_relayers::RewardsAccountOwner::BridgedChain - ) - ).is_some() - } - - fn prepare_message_proof( - params: MessageProofParams, - ) -> (bridge_to_wococo_config::FromWococoBridgeHubMessagesProof, Weight) { - use cumulus_primitives_core::XcmpMessageSource; - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); - prepare_message_proof_from_parachain::< - Runtime, - bridge_common_config::BridgeGrandpaWococoInstance, - bridge_to_wococo_config::WithBridgeHubWococoMessageBridge, - >(params, generate_xcm_builder_bridge_message_sample(X2(GlobalConsensus(Rococo), Parachain(42)))) - } - - fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams, - ) -> bridge_to_wococo_config::ToWococoBridgeHubMessagesDeliveryProof { - prepare_message_delivery_proof_from_parachain::< - Runtime, - bridge_common_config::BridgeGrandpaWococoInstance, - bridge_to_wococo_config::WithBridgeHubWococoMessageBridge, - >(params) - } - - fn is_message_successfully_dispatched(_nonce: bp_messages::MessageNonce) -> bool { - use cumulus_primitives_core::XcmpMessageSource; - !XcmpQueue::take_outbound_messages(usize::MAX).is_empty() - } - } - impl BridgeMessagesConfig for Runtime { fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool { let bench_lane_id = >::bench_lane_id(); @@ -1216,49 +1012,6 @@ impl_runtime_apis! { } } - impl BridgeMessagesConfig for Runtime { - fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool { - let bench_lane_id = >::bench_lane_id(); - let bridged_chain_id = bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID; - pallet_bridge_relayers::Pallet::::relayer_reward( - relayer, - bp_relayers::RewardsAccountParams::new( - bench_lane_id, - bridged_chain_id, - bp_relayers::RewardsAccountOwner::BridgedChain - ) - ).is_some() - } - - fn prepare_message_proof( - params: MessageProofParams, - ) -> (bridge_to_rococo_config::FromRococoBridgeHubMessagesProof, Weight) { - use cumulus_primitives_core::XcmpMessageSource; - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); - prepare_message_proof_from_parachain::< - Runtime, - bridge_common_config::BridgeGrandpaRococoInstance, - bridge_to_rococo_config::WithBridgeHubRococoMessageBridge, - >(params, generate_xcm_builder_bridge_message_sample(X2(GlobalConsensus(Wococo), Parachain(42)))) - } - - fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams, - ) -> bridge_to_rococo_config::ToRococoBridgeHubMessagesDeliveryProof { - prepare_message_delivery_proof_from_parachain::< - Runtime, - bridge_common_config::BridgeGrandpaRococoInstance, - bridge_to_rococo_config::WithBridgeHubRococoMessageBridge, - >(params) - } - - fn is_message_successfully_dispatched(_nonce: bp_messages::MessageNonce) -> bool { - use cumulus_primitives_core::XcmpMessageSource; - !XcmpQueue::take_outbound_messages(usize::MAX).is_empty() - } - } - use bridge_runtime_common::parachains_benchmarking::prepare_parachain_heads_proof; use pallet_bridge_parachains::benchmarking::Config as BridgeParachainsConfig; use pallet_bridge_relayers::benchmarking::{ @@ -1266,30 +1019,6 @@ impl_runtime_apis! { Config as BridgeRelayersConfig, }; - impl BridgeParachainsConfig for Runtime { - fn parachains() -> Vec { - use bp_runtime::Parachain; - vec![bp_polkadot_core::parachains::ParaId(bp_bridge_hub_wococo::BridgeHubWococo::PARACHAIN_ID)] - } - - fn prepare_parachain_heads_proof( - parachains: &[bp_polkadot_core::parachains::ParaId], - parachain_head_size: u32, - proof_size: bp_runtime::StorageProofSize, - ) -> ( - pallet_bridge_parachains::RelayBlockNumber, - pallet_bridge_parachains::RelayBlockHash, - bp_polkadot_core::parachains::ParaHeadsProof, - Vec<(bp_polkadot_core::parachains::ParaId, bp_polkadot_core::parachains::ParaHash)>, - ) { - prepare_parachain_heads_proof::( - parachains, - parachain_head_size, - proof_size, - ) - } - } - impl BridgeParachainsConfig for Runtime { fn parachains() -> Vec { use bp_runtime::Parachain; @@ -1314,30 +1043,6 @@ impl_runtime_apis! { } } - impl BridgeParachainsConfig for Runtime { - fn parachains() -> Vec { - use bp_runtime::Parachain; - vec![bp_polkadot_core::parachains::ParaId(bp_bridge_hub_rococo::BridgeHubRococo::PARACHAIN_ID)] - } - - fn prepare_parachain_heads_proof( - parachains: &[bp_polkadot_core::parachains::ParaId], - parachain_head_size: u32, - proof_size: bp_runtime::StorageProofSize, - ) -> ( - pallet_bridge_parachains::RelayBlockNumber, - pallet_bridge_parachains::RelayBlockHash, - bp_polkadot_core::parachains::ParaHeadsProof, - Vec<(bp_polkadot_core::parachains::ParaId, bp_polkadot_core::parachains::ParaHash)>, - ) { - prepare_parachain_heads_proof::( - parachains, - parachain_head_size, - proof_size, - ) - } - } - impl BridgeRelayersConfig for Runtime { fn prepare_rewards_account( account_params: bp_relayers::RewardsAccountParams, @@ -1418,11 +1123,7 @@ mod tests { frame_system::CheckWeight::new(), pallet_transaction_payment::ChargeTransactionPayment::from(10), BridgeRejectObsoleteHeadersAndMessages, - ( - bridge_to_wococo_config::OnBridgeHubRococoRefundBridgeHubWococoMessages::default(), - bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), - bridge_to_rococo_config::OnBridgeHubWococoRefundBridgeHubRococoMessages::default(), - ), + (bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),) ); // for BridgeHubRococo @@ -1442,24 +1143,6 @@ mod tests { bhr_indirect_payload.additional_signed().unwrap().encode() ) } - - // for BridgeHubWococo - { - let bhw_indirect_payload = bp_bridge_hub_wococo::SignedExtension::from_params( - VERSION.spec_version, - VERSION.transaction_version, - bp_runtime::TransactionEra::Immortal, - System::block_hash(BlockNumber::zero()), - 10, - 10, - (((), ()), ((), ())), - ); - assert_eq!(payload.encode(), bhw_indirect_payload.encode()); - assert_eq!( - payload.additional_signed().unwrap().encode(), - bhw_indirect_payload.additional_signed().unwrap().encode() - ) - } }); } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs index 66f8f1edf3c..a615f539547 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs @@ -17,6 +17,9 @@ //! Expose the auto generated weight files. +use ::pallet_bridge_messages::WeightInfoExt as MessagesWeightInfoExt; +use ::pallet_bridge_parachains::WeightInfoExt as ParachainsWeightInfoExt; + pub mod block_weights; pub mod cumulus_pallet_dmp_queue; pub mod cumulus_pallet_parachain_system; @@ -24,15 +27,9 @@ pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; pub mod pallet_balances; -pub mod pallet_bridge_grandpa_rococo_finality; -pub mod pallet_bridge_grandpa_westend_finality; -pub mod pallet_bridge_grandpa_wococo_finality; -pub mod pallet_bridge_messages_rococo_to_westend; -pub mod pallet_bridge_messages_rococo_to_wococo; -pub mod pallet_bridge_messages_wococo_to_rococo; -pub mod pallet_bridge_parachains_within_rococo; -pub mod pallet_bridge_parachains_within_westend; -pub mod pallet_bridge_parachains_within_wococo; +pub mod pallet_bridge_grandpa; +pub mod pallet_bridge_messages; +pub mod pallet_bridge_parachains; pub mod pallet_bridge_relayers; pub mod pallet_collator_selection; pub mod pallet_message_queue; @@ -56,43 +53,7 @@ use frame_support::weights::Weight; // import trait from dependency module use ::pallet_bridge_relayers::WeightInfoExt as _; -impl pallet_bridge_messages::WeightInfoExt - for pallet_bridge_messages_wococo_to_rococo::WeightInfo -{ - fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE - } - - fn receive_messages_proof_overhead_from_runtime() -> Weight { - pallet_bridge_relayers::WeightInfo::::receive_messages_proof_overhead_from_runtime( - ) - } - - fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight { - pallet_bridge_relayers::WeightInfo::::receive_messages_delivery_proof_overhead_from_runtime() - } -} - -impl pallet_bridge_messages::WeightInfoExt - for pallet_bridge_messages_rococo_to_wococo::WeightInfo -{ - fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_wococo::EXTRA_STORAGE_PROOF_SIZE - } - - fn receive_messages_proof_overhead_from_runtime() -> Weight { - pallet_bridge_relayers::WeightInfo::::receive_messages_proof_overhead_from_runtime( - ) - } - - fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight { - pallet_bridge_relayers::WeightInfo::::receive_messages_delivery_proof_overhead_from_runtime() - } -} - -impl pallet_bridge_messages::WeightInfoExt - for pallet_bridge_messages_rococo_to_westend::WeightInfo -{ +impl MessagesWeightInfoExt for pallet_bridge_messages::WeightInfo { fn expected_extra_storage_proof_size() -> u32 { bp_bridge_hub_westend::EXTRA_STORAGE_PROOF_SIZE } @@ -107,26 +68,8 @@ impl pallet_bridge_messages::WeightInfoExt } } -impl pallet_bridge_parachains::WeightInfoExt - for pallet_bridge_parachains_within_rococo::WeightInfo -{ - fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE - } -} - -impl pallet_bridge_parachains::WeightInfoExt - for pallet_bridge_parachains_within_westend::WeightInfo -{ +impl ParachainsWeightInfoExt for pallet_bridge_parachains::WeightInfo { fn expected_extra_storage_proof_size() -> u32 { bp_bridge_hub_westend::EXTRA_STORAGE_PROOF_SIZE } } - -impl pallet_bridge_parachains::WeightInfoExt - for pallet_bridge_parachains_within_wococo::WeightInfo -{ - fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_wococo::EXTRA_STORAGE_PROOF_SIZE - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs index 8ef05f17856..aaa6a3e0622 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs @@ -1,40 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_bridge_grandpa` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_bridge_grandpa -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_grandpa +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,33 +48,31 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_grandpa`. pub struct WeightInfo(PhantomData); impl pallet_bridge_grandpa::WeightInfo for WeightInfo { - /// Storage: BridgeRococoGrandpa PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa BestFinalized (r:1 w:1) - /// Proof: BridgeRococoGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: 531, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa CurrentAuthoritySet (r:1 w:0) - /// Proof: BridgeRococoGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(50250), added: 50745, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa ImportedHashesPointer (r:1 w:1) - /// Proof: BridgeRococoGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa ImportedHashes (r:1 w:1) - /// Proof: BridgeRococoGrandpa ImportedHashes (max_values: Some(1024), max_size: Some(36), added: 1521, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa ImportedHeaders (r:0 w:2) - /// Proof: BridgeRococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. + /// Storage: `BridgeWestendGrandpa::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashes` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0 w:2) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 838]`. /// The range of component `v` is `[50, 100]`. fn submit_finality_proof(p: u32, v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `231 + p * (60 ±0)` + // Measured: `335 + p * (60 ±0)` // Estimated: `51735` - // Minimum execution time: 241_332_000 picoseconds. - Weight::from_parts(69_790_821, 0) + // Minimum execution time: 311_267_000 picoseconds. + Weight::from_parts(313_903_000, 0) .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 6_013 - .saturating_add(Weight::from_parts(47_580_554, 0).saturating_mul(p.into())) - // Standard Error: 100_298 - .saturating_add(Weight::from_parts(1_213_475, 0).saturating_mul(v.into())) + // Standard Error: 4_779 + .saturating_add(Weight::from_parts(55_265_953, 0).saturating_mul(p.into())) + // Standard Error: 36_883 + .saturating_add(Weight::from_parts(153_660, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_rococo_finality.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_rococo_finality.rs deleted file mode 100644 index 0bb798bd9ec..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_rococo_finality.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_grandpa` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_grandpa -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_grandpa`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_grandpa::WeightInfo for WeightInfo { - /// Storage: `BridgeRococoGrandpa::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::BestFinalized` (r:1 w:1) - /// Proof: `BridgeRococoGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::CurrentAuthoritySet` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHashesPointer` (r:1 w:1) - /// Proof: `BridgeRococoGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHashes` (r:1 w:1) - /// Proof: `BridgeRococoGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:0 w:2) - /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - fn submit_finality_proof(p: u32, v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `268 + p * (60 ±0)` - // Estimated: `51735` - // Minimum execution time: 304_726_000 picoseconds. - Weight::from_parts(16_868_060, 0) - .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 2_802 - .saturating_add(Weight::from_parts(55_200_017, 0).saturating_mul(p.into())) - // Standard Error: 46_745 - .saturating_add(Weight::from_parts(2_689_151, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_westend_finality.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_westend_finality.rs deleted file mode 100644 index 4ed140b7d17..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_westend_finality.rs +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_bridge_grandpa` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_grandpa -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_grandpa`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_grandpa::WeightInfo for WeightInfo { - /// Storage: `BridgeWestendGrandpa::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:1 w:1) - /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) - /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHashes` (r:1 w:1) - /// Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0 w:2) - /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - fn submit_finality_proof(p: u32, v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `335 + p * (60 ±0)` - // Estimated: `51735` - // Minimum execution time: 305_905_000 picoseconds. - Weight::from_parts(2_636_863, 0) - .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 2_724 - .saturating_add(Weight::from_parts(55_199_477, 0).saturating_mul(p.into())) - // Standard Error: 45_444 - .saturating_add(Weight::from_parts(2_835_596, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_wococo_finality.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_wococo_finality.rs deleted file mode 100644 index a82854e0c67..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_wococo_finality.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_grandpa` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_grandpa -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_grandpa`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_grandpa::WeightInfo for WeightInfo { - /// Storage: `BridgeWococoGrandpa::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::BestFinalized` (r:1 w:1) - /// Proof: `BridgeWococoGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::CurrentAuthoritySet` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHashesPointer` (r:1 w:1) - /// Proof: `BridgeWococoGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHashes` (r:1 w:1) - /// Proof: `BridgeWococoGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:0 w:2) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - fn submit_finality_proof(p: u32, v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `302 + p * (60 ±0)` - // Estimated: `51735` - // Minimum execution time: 305_146_000 picoseconds. - Weight::from_parts(308_711_000, 0) - .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 2_651 - .saturating_add(Weight::from_parts(55_082_480, 0).saturating_mul(p.into())) - // Standard Error: 20_462 - .saturating_add(Weight::from_parts(298_367, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs index 319a4de8e96..17a45df5bfb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs @@ -1,40 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_bridge_messages` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_bridge_messages -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_messages +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,184 +48,195 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_messages`. pub struct WeightInfo(PhantomData); impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `538` // Estimated: `52645` - // Minimum execution time: 43_187_000 picoseconds. - Weight::from_parts(43_681_000, 0) + // Minimum execution time: 41_577_000 picoseconds. + Weight::from_parts(42_621_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_two_messages_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `538` // Estimated: `52645` - // Minimum execution time: 54_131_000 picoseconds. - Weight::from_parts(54_813_000, 0) + // Minimum execution time: 52_880_000 picoseconds. + Weight::from_parts(53_697_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `538` // Estimated: `52645` - // Minimum execution time: 48_120_000 picoseconds. - Weight::from_parts(48_733_000, 0) + // Minimum execution time: 47_424_000 picoseconds. + Weight::from_parts(48_445_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) fn receive_single_message_proof_1_kb() -> Weight { // Proof Size summary in bytes: - // Measured: `335` + // Measured: `506` // Estimated: `52645` - // Minimum execution time: 41_028_000 picoseconds. - Weight::from_parts(41_635_000, 0) + // Minimum execution time: 40_619_000 picoseconds. + Weight::from_parts(42_262_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) fn receive_single_message_proof_16_kb() -> Weight { // Proof Size summary in bytes: - // Measured: `335` + // Measured: `506` // Estimated: `52645` - // Minimum execution time: 68_499_000 picoseconds. - Weight::from_parts(69_263_000, 0) + // Minimum execution time: 74_603_000 picoseconds. + Weight::from_parts(78_209_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages OutboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: 539, mode: MaxEncodedLen) - /// Storage: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof Skipped: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) + /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `3804` - // Minimum execution time: 32_277_000 picoseconds. - Weight::from_parts(32_880_000, 0) - .saturating_add(Weight::from_parts(0, 3804)) + // Measured: `377` + // Estimated: `3842` + // Minimum execution time: 33_762_000 picoseconds. + Weight::from_parts(34_405_000, 0) + .saturating_add(Weight::from_parts(0, 3842)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages OutboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: 539, mode: MaxEncodedLen) - /// Storage: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof Skipped: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) + /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `3804` - // Minimum execution time: 32_504_000 picoseconds. - Weight::from_parts(33_085_000, 0) - .saturating_add(Weight::from_parts(0, 3804)) + // Measured: `377` + // Estimated: `3842` + // Minimum execution time: 33_805_000 picoseconds. + Weight::from_parts(35_051_000, 0) + .saturating_add(Weight::from_parts(0, 3842)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages OutboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: 539, mode: MaxEncodedLen) - /// Storage: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof Skipped: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) + /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `339` + // Measured: `377` // Estimated: `6086` - // Minimum execution time: 34_963_000 picoseconds. - Weight::from_parts(35_473_000, 0) + // Minimum execution time: 38_612_000 picoseconds. + Weight::from_parts(39_412_000, 0) .saturating_add(Weight::from_parts(0, 6086)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem RelevantMessagingState (r:1 w:0) - /// Proof Skipped: ParachainSystem RelevantMessagingState (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmpQueue OutboundXcmpStatus (r:1 w:1) - /// Proof Skipped: XcmpQueue OutboundXcmpStatus (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmpQueue OutboundXcmpMessages (r:0 w:1) - /// Proof Skipped: XcmpQueue OutboundXcmpMessages (max_values: None, max_size: None, mode: Measured) - /// The range of component `i` is `[128, 2048]`. + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[128, 2048]`. fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `635` + // Measured: `669` // Estimated: `52645` - // Minimum execution time: 129_978_000 picoseconds. - Weight::from_parts(98_246_356, 0) + // Minimum execution time: 69_285_000 picoseconds. + Weight::from_parts(70_867_498, 0) .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 2_554 - .saturating_add(Weight::from_parts(544_728, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(9)) + // Standard Error: 111 + .saturating_add(Weight::from_parts(7_489, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs deleted file mode 100644 index 6513b63474a..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_bridge_messages` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_messages -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_messages`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `643` - // Estimated: `52645` - // Minimum execution time: 41_873_000 picoseconds. - Weight::from_parts(43_434_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `643` - // Estimated: `52645` - // Minimum execution time: 53_328_000 picoseconds. - Weight::from_parts(54_592_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - // Proof Size summary in bytes: - // Measured: `643` - // Estimated: `52645` - // Minimum execution time: 47_486_000 picoseconds. - Weight::from_parts(48_721_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `611` - // Estimated: `52645` - // Minimum execution time: 41_093_000 picoseconds. - Weight::from_parts(42_050_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `611` - // Estimated: `52645` - // Minimum execution time: 71_947_000 picoseconds. - Weight::from_parts(74_564_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_single_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `482` - // Estimated: `3947` - // Minimum execution time: 31_235_000 picoseconds. - Weight::from_parts(32_051_000, 0) - .saturating_add(Weight::from_parts(0, 3947)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - // Proof Size summary in bytes: - // Measured: `482` - // Estimated: `3947` - // Minimum execution time: 31_320_000 picoseconds. - Weight::from_parts(31_973_000, 0) - .saturating_add(Weight::from_parts(0, 3947)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - // Proof Size summary in bytes: - // Measured: `482` - // Estimated: `6086` - // Minimum execution time: 33_656_000 picoseconds. - Weight::from_parts(34_779_000, 0) - .saturating_add(Weight::from_parts(0, 6086)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `772` - // Estimated: `52645` - // Minimum execution time: 61_671_000 picoseconds. - Weight::from_parts(62_656_321, 0) - .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 25 - .saturating_add(Weight::from_parts(6_641, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_wococo.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_wococo.rs deleted file mode 100644 index e2f58cdfad5..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_wococo.rs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_messages` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_messages -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_messages`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `609` - // Estimated: `52645` - // Minimum execution time: 42_407_000 picoseconds. - Weight::from_parts(43_917_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `609` - // Estimated: `52645` - // Minimum execution time: 53_258_000 picoseconds. - Weight::from_parts(55_144_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - // Proof Size summary in bytes: - // Measured: `609` - // Estimated: `52645` - // Minimum execution time: 47_950_000 picoseconds. - Weight::from_parts(49_315_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `577` - // Estimated: `52645` - // Minimum execution time: 41_383_000 picoseconds. - Weight::from_parts(42_898_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `577` - // Estimated: `52645` - // Minimum execution time: 72_118_000 picoseconds. - Weight::from_parts(74_643_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_single_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `448` - // Estimated: `3913` - // Minimum execution time: 30_993_000 picoseconds. - Weight::from_parts(31_793_000, 0) - .saturating_add(Weight::from_parts(0, 3913)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - // Proof Size summary in bytes: - // Measured: `448` - // Estimated: `3913` - // Minimum execution time: 30_894_000 picoseconds. - Weight::from_parts(31_925_000, 0) - .saturating_add(Weight::from_parts(0, 3913)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - // Proof Size summary in bytes: - // Measured: `448` - // Estimated: `6086` - // Minimum execution time: 33_804_000 picoseconds. - Weight::from_parts(34_560_000, 0) - .saturating_add(Weight::from_parts(0, 6086)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `811` - // Estimated: `52645` - // Minimum execution time: 62_616_000 picoseconds. - Weight::from_parts(64_073_891, 0) - .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 43 - .saturating_add(Weight::from_parts(6_525, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_wococo_to_rococo.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_wococo_to_rococo.rs deleted file mode 100644 index d9c0fd15468..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_wococo_to_rococo.rs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_messages` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_messages -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_messages`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `52645` - // Minimum execution time: 42_086_000 picoseconds. - Weight::from_parts(42_833_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `52645` - // Minimum execution time: 51_927_000 picoseconds. - Weight::from_parts(53_847_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `52645` - // Minimum execution time: 47_218_000 picoseconds. - Weight::from_parts(48_380_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `52645` - // Minimum execution time: 40_585_000 picoseconds. - Weight::from_parts(41_714_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `52645` - // Minimum execution time: 71_197_000 picoseconds. - Weight::from_parts(73_983_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_single_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `376` - // Estimated: `3841` - // Minimum execution time: 30_823_000 picoseconds. - Weight::from_parts(31_501_000, 0) - .saturating_add(Weight::from_parts(0, 3841)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - // Proof Size summary in bytes: - // Measured: `376` - // Estimated: `3841` - // Minimum execution time: 30_854_000 picoseconds. - Weight::from_parts(31_663_000, 0) - .saturating_add(Weight::from_parts(0, 3841)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - // Proof Size summary in bytes: - // Measured: `376` - // Estimated: `6086` - // Minimum execution time: 33_463_000 picoseconds. - Weight::from_parts(34_290_000, 0) - .saturating_add(Weight::from_parts(0, 6086)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `739` - // Estimated: `52645` - // Minimum execution time: 61_523_000 picoseconds. - Weight::from_parts(62_686_055, 0) - .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 26 - .saturating_add(Weight::from_parts(6_563, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs index bd7384a05fe..5c7c4a63682 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs @@ -1,40 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_bridge_parachains` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_bridge_parachains -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_parachains +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,64 +48,63 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_parachains`. pub struct WeightInfo(PhantomData); impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: BridgeWococoParachain PalletOperatingMode (r:1 w:0) - /// Proof: BridgeWococoParachain PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeWococoGrandpa ImportedHeaders (r:1 w:0) - /// Proof: BridgeWococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ParasInfo (r:1 w:1) - /// Proof: BridgeWococoParachain ParasInfo (max_values: Some(1), max_size: Some(60), added: 555, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHashes (r:1 w:1) - /// Proof: BridgeWococoParachain ImportedParaHashes (max_values: Some(64), max_size: Some(64), added: 1054, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHeads (r:0 w:1) - /// Proof: BridgeWococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// The range of component `p` is `[1, 2]`. + /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 2]`. fn submit_parachain_heads_with_n_parachains(_p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `434` // Estimated: `2543` - // Minimum execution time: 34_759_000 picoseconds. - Weight::from_parts(35_709_034, 0) + // Minimum execution time: 31_987_000 picoseconds. + Weight::from_parts(33_060_534, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: BridgeWococoParachain PalletOperatingMode (r:1 w:0) - /// Proof: BridgeWococoParachain PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeWococoGrandpa ImportedHeaders (r:1 w:0) - /// Proof: BridgeWococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ParasInfo (r:1 w:1) - /// Proof: BridgeWococoParachain ParasInfo (max_values: Some(1), max_size: Some(60), added: 555, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHashes (r:1 w:1) - /// Proof: BridgeWococoParachain ImportedParaHashes (max_values: Some(64), max_size: Some(64), added: 1054, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHeads (r:0 w:1) - /// Proof: BridgeWococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) + /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `434` // Estimated: `2543` - // Minimum execution time: 36_005_000 picoseconds. - Weight::from_parts(36_492_000, 0) + // Minimum execution time: 33_360_000 picoseconds. + Weight::from_parts(34_182_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: BridgeWococoParachain PalletOperatingMode (r:1 w:0) - /// Proof: BridgeWococoParachain PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeWococoGrandpa ImportedHeaders (r:1 w:0) - /// Proof: BridgeWococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ParasInfo (r:1 w:1) - /// Proof: BridgeWococoParachain ParasInfo (max_values: Some(1), max_size: Some(60), added: 555, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHashes (r:1 w:1) - /// Proof: BridgeWococoParachain ImportedParaHashes (max_values: Some(64), max_size: Some(64), added: 1054, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHeads (r:0 w:1) - /// Proof: BridgeWococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) + /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `434` // Estimated: `2543` - // Minimum execution time: 62_374_000 picoseconds. - Weight::from_parts(62_977_000, 0) + // Minimum execution time: 65_246_000 picoseconds. + Weight::from_parts(65_985_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_rococo.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_rococo.rs deleted file mode 100644 index e36bbcca42e..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_rococo.rs +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_parachains` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_parachains -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_parachains`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: `BridgeRococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(_p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `2543` - // Minimum execution time: 31_241_000 picoseconds. - Weight::from_parts(32_488_584, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeRococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_1kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `2543` - // Minimum execution time: 32_962_000 picoseconds. - Weight::from_parts(33_658_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeRococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_16kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `2543` - // Minimum execution time: 62_685_000 picoseconds. - Weight::from_parts(64_589_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_westend.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_westend.rs deleted file mode 100644 index bfe93b4c36a..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_westend.rs +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_bridge_parachains` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_parachains -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_parachains`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: `BridgeWestendParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `468` - // Estimated: `2543` - // Minimum execution time: 31_493_000 picoseconds. - Weight::from_parts(32_511_270, 0) - .saturating_add(Weight::from_parts(0, 2543)) - // Standard Error: 33_650 - .saturating_add(Weight::from_parts(20_764, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWestendParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_1kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `468` - // Estimated: `2543` - // Minimum execution time: 32_976_000 picoseconds. - Weight::from_parts(33_647_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWestendParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_16kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `468` - // Estimated: `2543` - // Minimum execution time: 62_898_000 picoseconds. - Weight::from_parts(64_463_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_wococo.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_wococo.rs deleted file mode 100644 index d685daf930f..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_wococo.rs +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_parachains` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_parachains -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_parachains`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: `BridgeWococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `2543` - // Minimum execution time: 31_573_000 picoseconds. - Weight::from_parts(32_739_400, 0) - .saturating_add(Weight::from_parts(0, 2543)) - // Standard Error: 49_518 - .saturating_add(Weight::from_parts(5_166, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_1kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `2543` - // Minimum execution time: 32_780_000 picoseconds. - Weight::from_parts(33_797_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_16kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `2543` - // Minimum execution time: 62_847_000 picoseconds. - Weight::from_parts(63_991_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs index 48f0c1f949b..70af694645d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs @@ -1,24 +1,25 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_bridge_relayers` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -55,8 +56,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `207` // Estimated: `3593` - // Minimum execution time: 45_338_000 picoseconds. - Weight::from_parts(45_836_000, 0) + // Minimum execution time: 46_579_000 picoseconds. + Weight::from_parts(48_298_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -71,8 +72,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `61` // Estimated: `4714` - // Minimum execution time: 23_561_000 picoseconds. - Weight::from_parts(24_012_000, 0) + // Minimum execution time: 24_219_000 picoseconds. + Weight::from_parts(24_993_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -85,8 +86,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `160` // Estimated: `4714` - // Minimum execution time: 25_133_000 picoseconds. - Weight::from_parts(25_728_000, 0) + // Minimum execution time: 26_279_000 picoseconds. + Weight::from_parts(26_810_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -101,8 +102,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `263` // Estimated: `4714` - // Minimum execution time: 27_356_000 picoseconds. - Weight::from_parts(27_828_000, 0) + // Minimum execution time: 27_672_000 picoseconds. + Weight::from_parts(28_946_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -113,8 +114,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `6` // Estimated: `3538` - // Minimum execution time: 2_955_000 picoseconds. - Weight::from_parts(3_084_000, 0) + // Minimum execution time: 5_487_000 picoseconds. + Weight::from_parts(5_725_000, 0) .saturating_add(Weight::from_parts(0, 3538)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index cb7ad7a7803..d7e8c41ff8a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -1,24 +1,25 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -53,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 19_037_000 picoseconds. - Weight::from_parts(19_602_000, 3593) + // Minimum execution time: 19_610_000 picoseconds. + Weight::from_parts(19_980_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -64,15 +65,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `153` // Estimated: `6196` - // Minimum execution time: 43_115_000 picoseconds. - Weight::from_parts(43_897_000, 6196) + // Minimum execution time: 44_411_000 picoseconds. + Weight::from_parts(45_110_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } // Storage: `System::Account` (r:3 w:3) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -89,11 +88,11 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `294` + // Measured: `223` // Estimated: `8799` - // Minimum execution time: 90_267_000 picoseconds. - Weight::from_parts(91_460_000, 8799) - .saturating_add(T::DbWeight::get().reads(11)) + // Minimum execution time: 89_739_000 picoseconds. + Weight::from_parts(91_256_000, 8799) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } // Storage: `Benchmark::Override` (r:0 w:0) @@ -105,8 +104,6 @@ impl WeightInfo { // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. Weight::from_parts(18_446_744_073_709_551_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -125,19 +122,19 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_reserve_withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 60_477_000 picoseconds. - Weight::from_parts(61_314_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 60_045_000 picoseconds. + Weight::from_parts(60_710_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn receive_teleported_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_996_000 picoseconds. - Weight::from_parts(3_107_000, 0) + // Minimum execution time: 3_257_000 picoseconds. + Weight::from_parts(3_392_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -145,15 +142,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `52` // Estimated: `3593` - // Minimum execution time: 18_907_000 picoseconds. - Weight::from_parts(19_475_000, 3593) + // Minimum execution time: 19_423_000 picoseconds. + Weight::from_parts(19_823_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: `System::Account` (r:2 w:2) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -170,15 +165,13 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `193` + // Measured: `122` // Estimated: `6196` - // Minimum execution time: 59_143_000 picoseconds. - Weight::from_parts(60_316_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 60_484_000 picoseconds. + Weight::from_parts(61_634_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -197,11 +190,11 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_teleport() -> Weight { // Proof Size summary in bytes: - // Measured: `141` - // Estimated: `3606` - // Minimum execution time: 44_459_000 picoseconds. - Weight::from_parts(45_365_000, 3606) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `70` + // Estimated: `3593` + // Minimum execution time: 44_863_000 picoseconds. + Weight::from_parts(45_549_000, 3593) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 4eee8f0e613..0ae6d0b5623 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -1,24 +1,25 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -47,8 +48,6 @@ use sp_std::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); impl WeightInfo { - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -67,81 +66,79 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 62_732_000 picoseconds. - Weight::from_parts(64_581_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 63_453_000 picoseconds. + Weight::from_parts(64_220_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_987_000 picoseconds. - Weight::from_parts(2_107_000, 0) + // Minimum execution time: 2_238_000 picoseconds. + Weight::from_parts(2_351_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn query_response() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3568` - // Minimum execution time: 8_098_000 picoseconds. - Weight::from_parts(8_564_000, 3568) + // Measured: `32` + // Estimated: `3497` + // Minimum execution time: 7_953_000 picoseconds. + Weight::from_parts(8_162_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_539_000 picoseconds. - Weight::from_parts(9_085_000, 0) + // Minimum execution time: 9_080_000 picoseconds. + Weight::from_parts(9_333_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_205_000 picoseconds. - Weight::from_parts(2_369_000, 0) + // Minimum execution time: 2_415_000 picoseconds. + Weight::from_parts(2_519_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_828_000 picoseconds. - Weight::from_parts(1_994_000, 0) + // Minimum execution time: 2_045_000 picoseconds. + Weight::from_parts(2_184_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_869_000 picoseconds. - Weight::from_parts(1_946_000, 0) + // Minimum execution time: 2_065_000 picoseconds. + Weight::from_parts(2_125_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_842_000 picoseconds. - Weight::from_parts(1_949_000, 0) + // Minimum execution time: 2_077_000 picoseconds. + Weight::from_parts(2_164_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_460_000 picoseconds. - Weight::from_parts(2_593_000, 0) + // Minimum execution time: 2_868_000 picoseconds. + Weight::from_parts(2_933_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(2_003_000, 0) + // Minimum execution time: 2_058_000 picoseconds. + Weight::from_parts(2_164_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -160,21 +157,21 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 56_813_000 picoseconds. - Weight::from_parts(57_728_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 55_971_000 picoseconds. + Weight::from_parts(56_869_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn claim_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `160` - // Estimated: `3625` - // Minimum execution time: 11_364_000 picoseconds. - Weight::from_parts(11_872_000, 3625) + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 11_382_000 picoseconds. + Weight::from_parts(11_672_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -182,8 +179,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_821_000 picoseconds. - Weight::from_parts(1_936_000, 0) + // Minimum execution time: 2_071_000 picoseconds. + Weight::from_parts(2_193_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -201,10 +198,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn subscribe_version() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 23_081_000 picoseconds. - Weight::from_parts(23_512_000, 3574) + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 22_573_000 picoseconds. + Weight::from_parts(23_423_000, 3503) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -214,47 +211,45 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_747_000 picoseconds. - Weight::from_parts(4_068_000, 0) + // Minimum execution time: 3_870_000 picoseconds. + Weight::from_parts(3_993_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_045_000 picoseconds. - Weight::from_parts(3_208_000, 0) + // Minimum execution time: 3_483_000 picoseconds. + Weight::from_parts(3_598_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_962_000 picoseconds. - Weight::from_parts(2_284_000, 0) + // Minimum execution time: 2_241_000 picoseconds. + Weight::from_parts(2_297_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_951_000 picoseconds. - Weight::from_parts(2_026_000, 0) + // Minimum execution time: 2_230_000 picoseconds. + Weight::from_parts(2_318_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_837_000 picoseconds. - Weight::from_parts(2_084_000, 0) + // Minimum execution time: 2_051_000 picoseconds. + Weight::from_parts(2_153_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_042_000 picoseconds. - Weight::from_parts(2_145_000, 0) + // Minimum execution time: 2_306_000 picoseconds. + Weight::from_parts(2_380_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -273,22 +268,20 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 61_350_000 picoseconds. - Weight::from_parts(62_440_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 60_201_000 picoseconds. + Weight::from_parts(61_132_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_993_000 picoseconds. - Weight::from_parts(5_309_000, 0) + // Minimum execution time: 4_554_000 picoseconds. + Weight::from_parts(4_704_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -307,70 +300,68 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 57_133_000 picoseconds. - Weight::from_parts(58_100_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 56_071_000 picoseconds. + Weight::from_parts(56_889_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_899_000 picoseconds. - Weight::from_parts(2_153_000, 0) + // Minimum execution time: 2_093_000 picoseconds. + Weight::from_parts(2_169_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_880_000 picoseconds. - Weight::from_parts(1_960_000, 0) + // Minimum execution time: 2_027_000 picoseconds. + Weight::from_parts(2_172_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_825_000 picoseconds. - Weight::from_parts(1_960_000, 0) + // Minimum execution time: 2_035_000 picoseconds. + Weight::from_parts(2_164_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - // Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::OutboundLanes` (r:1 w:1) - // Proof: `BridgeRococoToWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::OutboundLanesCongestedSignals` (r:1 w:0) - // Proof: `BridgeRococoToWococoMessages::OutboundLanesCongestedSignals` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::OutboundMessages` (r:0 w:1) - // Proof: `BridgeRococoToWococoMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + // Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + // Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::OutboundLanesCongestedSignals` (r:1 w:0) + // Proof: `BridgeWestendMessages::OutboundLanesCongestedSignals` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::OutboundMessages` (r:0 w:1) + // Proof: `BridgeWestendMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 1000]`. pub fn export_message(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `139` - // Estimated: `3604` - // Minimum execution time: 28_419_000 picoseconds. - Weight::from_parts(29_387_791, 3604) - // Standard Error: 552 - .saturating_add(Weight::from_parts(316_277, 0).saturating_mul(x.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `96` + // Estimated: `1529` + // Minimum execution time: 25_636_000 picoseconds. + Weight::from_parts(25_405_640, 1529) + // Standard Error: 321 + .saturating_add(Weight::from_parts(365_002, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_903_000 picoseconds. - Weight::from_parts(2_023_000, 0) + // Minimum execution time: 2_036_000 picoseconds. + Weight::from_parts(2_136_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_963_000 picoseconds. - Weight::from_parts(2_143_000, 0) + // Minimum execution time: 2_147_000 picoseconds. + Weight::from_parts(2_276_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index 1b1e6f8ba71..1436c5b96a3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -16,12 +16,11 @@ use super::{ AccountId, AllPalletsWithSystem, Balances, BaseDeliveryFee, FeeAssetId, ParachainInfo, - ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeFlavor, RuntimeOrigin, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmpQueue, }; use crate::bridge_common_config::{ - BridgeGrandpaRococoInstance, BridgeGrandpaWestendInstance, BridgeGrandpaWococoInstance, - DeliveryRewardInBalance, RequiredStakeForStakeAndSlash, + BridgeGrandpaWestendInstance, DeliveryRewardInBalance, RequiredStakeForStakeAndSlash, }; use bp_messages::LaneId; use bp_relayers::{PayRewardFromAccount, RewardsAccountOwner, RewardsAccountParams}; @@ -60,9 +59,9 @@ use xcm_executor::{ }; parameter_types! { - pub storage Flavor: RuntimeFlavor = RuntimeFlavor::default(); pub const TokenLocation: MultiLocation = MultiLocation::parent(); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); + pub RelayNetwork: NetworkId = NetworkId::Rococo; pub UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())); pub const MaxInstructions: u32 = 100; @@ -71,22 +70,6 @@ parameter_types! { pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); } -/// Adapter for resolving `NetworkId` based on `pub storage Flavor: RuntimeFlavor`. -pub struct RelayNetwork; -impl Get> for RelayNetwork { - fn get() -> Option { - Some(Self::get()) - } -} -impl Get for RelayNetwork { - fn get() -> NetworkId { - match Flavor::get() { - RuntimeFlavor::Rococo => NetworkId::Rococo, - RuntimeFlavor::Wococo => NetworkId::Wococo, - } - } -} - /// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used /// when determining ownership of accounts for asset transacting and when attempting to use XCM /// `Transact` in order to determine the dispatch Origin. @@ -170,8 +153,7 @@ impl Contains for SafeCallFilter { RuntimeCall::System(frame_system::Call::set_storage { items }) if items.iter().all(|(k, _)| { k.eq(&DeliveryRewardInBalance::key()) | - k.eq(&RequiredStakeForStakeAndSlash::key()) | - k.eq(&Flavor::key()) + k.eq(&RequiredStakeForStakeAndSlash::key()) }) => return true, _ => (), @@ -199,17 +181,9 @@ impl Contains for SafeCallFilter { ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | RuntimeCall::XcmpQueue(..) | RuntimeCall::MessageQueue(..) | - RuntimeCall::BridgeRococoGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - BridgeGrandpaRococoInstance, - >::initialize { .. }) | RuntimeCall::BridgeWestendGrandpa(pallet_bridge_grandpa::Call::< Runtime, BridgeGrandpaWestendInstance, - >::initialize { .. }) | - RuntimeCall::BridgeWococoGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - BridgeGrandpaWococoInstance, >::initialize { .. }) ) } @@ -298,13 +272,6 @@ impl xcm_executor::Config for XcmConfig { type FeeManager = XcmFeeManagerFromComponents< WaivedLocations, ( - XcmExportFeeToRelayerRewardAccounts< - Self::AssetTransactor, - crate::bridge_to_wococo_config::WococoGlobalConsensusNetwork, - crate::bridge_to_wococo_config::AssetHubWococoParaId, - crate::bridge_to_wococo_config::BridgeHubWococoChainId, - crate::bridge_to_wococo_config::AssetHubRococoToAssetHubWococoMessagesLane, - >, XcmExportFeeToRelayerRewardAccounts< Self::AssetTransactor, crate::bridge_to_westend_config::WestendGlobalConsensusNetwork, @@ -312,21 +279,10 @@ impl xcm_executor::Config for XcmConfig { crate::bridge_to_westend_config::BridgeHubWestendChainId, crate::bridge_to_westend_config::AssetHubRococoToAssetHubWestendMessagesLane, >, - XcmExportFeeToRelayerRewardAccounts< - Self::AssetTransactor, - crate::bridge_to_rococo_config::RococoGlobalConsensusNetwork, - crate::bridge_to_rococo_config::AssetHubRococoParaId, - crate::bridge_to_rococo_config::BridgeHubRococoChainId, - crate::bridge_to_rococo_config::AssetHubWococoToAssetHubRococoMessagesLane, - >, XcmFeeToAccount, ), >; - type MessageExporter = ( - crate::bridge_to_westend_config::ToBridgeHubWestendHaulBlobExporter, - crate::bridge_to_wococo_config::ToBridgeHubWococoHaulBlobExporter, - crate::bridge_to_rococo_config::ToBridgeHubRococoHaulBlobExporter, - ); + type MessageExporter = (crate::bridge_to_westend_config::ToBridgeHubWestendHaulBlobExporter,); type UniversalAliases = Nothing; type CallDispatcher = WithOriginFilter; type SafeCallFilter = SafeCallFilter; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 39ee2576f5b..9597d71f6b2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -18,8 +18,7 @@ use bp_polkadot_core::Signature; use bridge_hub_rococo_runtime::{ - bridge_common_config, bridge_to_rococo_config, bridge_to_westend_config, - bridge_to_wococo_config, + bridge_common_config, bridge_to_westend_config, xcm_config::{RelayNetwork, TokenLocation, XcmConfig}, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, SignedExtra, @@ -57,11 +56,7 @@ fn construct_extrinsic( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), - ( - bridge_to_wococo_config::OnBridgeHubRococoRefundBridgeHubWococoMessages::default(), - bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), - bridge_to_rococo_config::OnBridgeHubWococoRefundBridgeHubRococoMessages::default(), - ), + (bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); @@ -105,18 +100,13 @@ fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::BridgeWococoGrandpa(call).encode()), - ); // for Westend finality bridge_hub_test_utils::test_cases::initialize_bridge_by_governance_works::< Runtime, @@ -195,28 +176,6 @@ mod bridge_hub_rococo_tests { #[test] fn handle_export_message_from_system_parachain_add_to_outbound_queue_works() { - // for Wococo - bridge_hub_test_utils::test_cases::handle_export_message_from_system_parachain_to_outbound_queue_works::< - Runtime, - XcmConfig, - WithBridgeHubWococoMessagesInstance, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::BridgeWococoMessages(event)) => Some(event), - _ => None, - } - }), - || ExportMessage { network: Wococo, destination: X1(Parachain(1234)), xcm: Xcm(vec![]) }, - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - Some((TokenLocation::get(), ExistentialDeposit::get()).into()), - // value should be >= than value generated by `can_calculate_weight_for_paid_export_message_with_reserve_transfer` - Some((TokenLocation::get(), bp_bridge_hub_rococo::BridgeHubRococoBaseXcmFeeInRocs::get()).into()), - || (), - ); // for Westend bridge_hub_test_utils::test_cases::handle_export_message_from_system_parachain_to_outbound_queue_works::< Runtime, @@ -243,41 +202,13 @@ mod bridge_hub_rococo_tests { #[test] fn message_dispatch_routing_works() { - // from Wococo - bridge_hub_test_utils::test_cases::message_dispatch_routing_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - WithBridgeHubWococoMessagesInstance, - RelayNetwork, - WococoGlobalConsensusNetwork, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::ParachainSystem(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - || (), - ); // from Westend bridge_hub_test_utils::test_cases::message_dispatch_routing_works::< Runtime, AllPalletsWithoutSystem, XcmConfig, ParachainSystem, - WithBridgeHubWococoMessagesInstance, + WithBridgeHubWestendMessagesInstance, RelayNetwork, WestendGlobalConsensusNetwork, >( @@ -303,25 +234,6 @@ mod bridge_hub_rococo_tests { #[test] fn relayed_incoming_message_works() { - // from Wococo - bridge_hub_test_utils::test_cases::relayed_incoming_message_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaWococoInstance, - BridgeParachainWococoInstance, - WithBridgeHubWococoMessagesInstance, - WithBridgeHubWococoMessageBridge, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Rococo, - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - || (), - ); // from Westend bridge_hub_test_utils::test_cases::relayed_incoming_message_works::< Runtime, @@ -345,29 +257,6 @@ mod bridge_hub_rococo_tests { #[test] pub fn complex_relay_extrinsic_works() { - // for Wococo - bridge_hub_test_utils::test_cases::complex_relay_extrinsic_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaWococoInstance, - BridgeParachainWococoInstance, - WithBridgeHubWococoMessagesInstance, - WithBridgeHubWococoMessageBridge, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - BridgeHubWococoChainId::get(), - Rococo, - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - ExistentialDeposit::get(), - executive_init_block, - construct_and_apply_extrinsic, - || (), - ); // for Westend bridge_hub_test_utils::test_cases::complex_relay_extrinsic_works::< Runtime, @@ -457,275 +346,3 @@ mod bridge_hub_rococo_tests { ); } } - -mod bridge_hub_wococo_tests { - use super::*; - use bridge_common_config::{ - BridgeGrandpaRococoInstance, BridgeParachainRococoInstance, DeliveryRewardInBalance, - RequiredStakeForStakeAndSlash, - }; - use bridge_hub_rococo_runtime::{xcm_config, AllPalletsWithoutSystem, RuntimeFlavor}; - use bridge_to_rococo_config::{ - BridgeHubRococoChainId, RococoGlobalConsensusNetwork, WithBridgeHubRococoMessageBridge, - WithBridgeHubRococoMessagesInstance, XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - }; - use frame_support::assert_ok; - - type RuntimeHelper = bridge_hub_test_utils::RuntimeHelper; - - pub(crate) fn set_wococo_flavor() { - let flavor_key = xcm_config::Flavor::key().to_vec(); - let flavor = RuntimeFlavor::Wococo; - - // encode `set_storage` call - let set_storage_call = RuntimeCall::System(frame_system::Call::::set_storage { - items: vec![(flavor_key, flavor.encode())], - }) - .encode(); - - // estimate - storing just 1 value - use frame_system::WeightInfo; - let require_weight_at_most = - ::SystemWeightInfo::set_storage(1); - - // execute XCM with Transact to `set_storage` as governance does - assert_ok!(RuntimeHelper::execute_as_governance(set_storage_call, require_weight_at_most) - .ensure_complete()); - - // check if stored - assert_eq!(flavor, xcm_config::Flavor::get()); - } - - bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID - ); - - #[test] - fn initialize_bridge_by_governance_works() { - bridge_hub_test_utils::test_cases::initialize_bridge_by_governance_works::< - Runtime, - BridgeGrandpaRococoInstance, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::BridgeRococoGrandpa(call).encode()), - ) - } - - #[test] - fn change_delivery_reward_by_governance_works() { - bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< - Runtime, - DeliveryRewardInBalance, - u64, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::System(call).encode()), - || (DeliveryRewardInBalance::key().to_vec(), DeliveryRewardInBalance::get()), - |old_value| old_value.checked_mul(2).unwrap(), - ) - } - - #[test] - fn change_required_stake_by_governance_works() { - bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< - Runtime, - RequiredStakeForStakeAndSlash, - Balance, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::System(call).encode()), - || { - ( - RequiredStakeForStakeAndSlash::key().to_vec(), - RequiredStakeForStakeAndSlash::get(), - ) - }, - |old_value| old_value.checked_mul(2).unwrap(), - ) - } - - #[test] - fn handle_export_message_from_system_parachain_add_to_outbound_queue_works() { - bridge_hub_test_utils::test_cases::handle_export_message_from_system_parachain_to_outbound_queue_works::< - Runtime, - XcmConfig, - WithBridgeHubRococoMessagesInstance, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::BridgeRococoMessages(event)) => Some(event), - _ => None, - } - }), - || ExportMessage { network: Rococo, destination: X1(Parachain(4321)), xcm: Xcm(vec![]) }, - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - Some((TokenLocation::get(), ExistentialDeposit::get()).into()), - // value should be >= than value generated by `can_calculate_weight_for_paid_export_message_with_reserve_transfer` - Some((TokenLocation::get(), bp_bridge_hub_wococo::BridgeHubWococoBaseXcmFeeInWocs::get()).into()), - set_wococo_flavor, - ) - } - - #[test] - fn message_dispatch_routing_works() { - bridge_hub_test_utils::test_cases::message_dispatch_routing_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - WithBridgeHubRococoMessagesInstance, - RelayNetwork, - RococoGlobalConsensusNetwork, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::ParachainSystem(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - set_wococo_flavor, - ) - } - - #[test] - fn relayed_incoming_message_works() { - bridge_hub_test_utils::test_cases::relayed_incoming_message_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Wococo, - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - set_wococo_flavor, - ) - } - - #[test] - pub fn complex_relay_extrinsic_works() { - bridge_hub_test_utils::test_cases::complex_relay_extrinsic_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - BridgeHubRococoChainId::get(), - Wococo, - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - ExistentialDeposit::get(), - executive_init_block, - construct_and_apply_extrinsic, - set_wococo_flavor, - ); - } - - #[test] - pub fn can_calculate_weight_for_paid_export_message_with_reserve_transfer() { - let estimated = bridge_hub_test_utils::test_cases::can_calculate_weight_for_paid_export_message_with_reserve_transfer::< - Runtime, - XcmConfig, - WeightToFee, - >(); - - // check if estimated value is sane - let max_expected = bp_bridge_hub_wococo::BridgeHubWococoBaseXcmFeeInWocs::get(); - assert!( - estimated <= max_expected, - "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_wococo::BridgeHubWococoBaseXcmFeeInWocs` value", - estimated, - max_expected - ); - } - - #[test] - pub fn can_calculate_fee_for_complex_message_delivery_transaction() { - let estimated = bridge_hub_test_utils::test_cases::can_calculate_fee_for_complex_message_delivery_transaction::< - Runtime, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( - collator_session_keys(), - construct_and_estimate_extrinsic_fee - ); - - // check if estimated value is sane - let max_expected = bp_bridge_hub_wococo::BridgeHubWococoBaseDeliveryFeeInWocs::get(); - assert!( - estimated <= max_expected, - "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_wococo::BridgeHubWococoBaseDeliveryFeeInWocs` value", - estimated, - max_expected - ); - } - - #[test] - pub fn can_calculate_fee_for_complex_message_confirmation_transaction() { - let estimated = bridge_hub_test_utils::test_cases::can_calculate_fee_for_complex_message_confirmation_transaction::< - Runtime, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( - collator_session_keys(), - construct_and_estimate_extrinsic_fee - ); - - // check if estimated value is sane - let max_expected = bp_bridge_hub_wococo::BridgeHubWococoBaseConfirmationFeeInWocs::get(); - assert!( - estimated <= max_expected, - "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_wococo::BridgeHubWococoBaseConfirmationFeeInWocs` value", - estimated, - max_expected - ); - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index 18181ed3e05..bd171be53bf 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -41,8 +41,6 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} # Bridges -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-wococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-wococo", default-features = false } bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } @@ -60,8 +58,6 @@ bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", de default = [ "std" ] std = [ "asset-test-utils/std", - "bp-bridge-hub-rococo/std", - "bp-bridge-hub-wococo/std", "bp-header-chain/std", "bp-messages/std", "bp-parachains/std", diff --git a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs index b4a73ff8aaa..a8d3d2975ad 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs @@ -32,7 +32,6 @@ pub type AssetHubWestendChainSpec = sc_service::GenericChainSpec; pub type AssetHubRococoChainSpec = sc_service::GenericChainSpec; -pub type AssetHubWococoChainSpec = AssetHubRococoChainSpec; const ASSET_HUB_POLKADOT_ED: AssetHubBalance = parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; @@ -433,6 +432,7 @@ pub fn asset_hub_westend_development_config() -> AssetHubWestendChainSpec { get_account_id_from_seed::("Alice//stash"), get_account_id_from_seed::("Bob//stash"), ], + parachains_common::westend::currency::UNITS * 1_000_000, 1000.into(), )) .with_properties(properties) @@ -478,6 +478,7 @@ pub fn asset_hub_westend_local_config() -> AssetHubWestendChainSpec { get_account_id_from_seed::("Eve//stash"), get_account_id_from_seed::("Ferdie//stash"), ], + parachains_common::westend::currency::UNITS * 1_000_000, 1000.into(), )) .with_properties(properties) @@ -522,6 +523,7 @@ pub fn asset_hub_westend_config() -> AssetHubWestendChainSpec { ), ], Vec::new(), + ASSET_HUB_WESTEND_ED * 4096, 1000.into(), )) .with_properties(properties) @@ -531,6 +533,7 @@ pub fn asset_hub_westend_config() -> AssetHubWestendChainSpec { fn asset_hub_westend_genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, + endowment: AssetHubBalance, id: ParaId, ) -> serde_json::Value { serde_json::json!({ @@ -538,7 +541,7 @@ fn asset_hub_westend_genesis( "balances": endowed_accounts .iter() .cloned() - .map(|k| (k, ASSET_HUB_WESTEND_ED * 4096)) + .map(|k| (k, endowment)) .collect::>(), }, "parachainInfo": { @@ -579,19 +582,6 @@ pub fn asset_hub_rococo_development_config() -> AssetHubRococoChainSpec { ) } -pub fn asset_hub_wococo_development_config() -> AssetHubWococoChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 42.into()); - properties.insert("tokenSymbol".into(), "WOC".into()); - properties.insert("tokenDecimals".into(), 12.into()); - asset_hub_rococo_like_development_config( - properties, - "Wococo Asset Hub Development", - "asset-hub-wococo-dev", - 1000, - ) -} - fn asset_hub_rococo_like_development_config( properties: sc_chain_spec::Properties, name: &str, @@ -617,6 +607,7 @@ fn asset_hub_rococo_like_development_config( get_account_id_from_seed::("Alice//stash"), get_account_id_from_seed::("Bob//stash"), ], + parachains_common::rococo::currency::UNITS * 1_000_000, para_id.into(), )) .with_properties(properties) @@ -636,19 +627,6 @@ pub fn asset_hub_rococo_local_config() -> AssetHubRococoChainSpec { ) } -pub fn asset_hub_wococo_local_config() -> AssetHubWococoChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 42.into()); - properties.insert("tokenSymbol".into(), "WOC".into()); - properties.insert("tokenDecimals".into(), 12.into()); - asset_hub_rococo_like_local_config( - properties, - "Wococo Asset Hub Local", - "asset-hub-wococo-local", - 1000, - ) -} - fn asset_hub_rococo_like_local_config( properties: sc_chain_spec::Properties, name: &str, @@ -688,6 +666,7 @@ fn asset_hub_rococo_like_local_config( get_account_id_from_seed::("Eve//stash"), get_account_id_from_seed::("Ferdie//stash"), ], + parachains_common::rococo::currency::UNITS * 1_000_000, para_id.into(), )) .with_properties(properties) @@ -735,54 +714,7 @@ pub fn asset_hub_rococo_genesis_config() -> AssetHubRococoChainSpec { ), ], Vec::new(), - para_id.into(), - )) - .with_properties(properties) - .build() -} - -pub fn asset_hub_wococo_genesis_config() -> AssetHubWococoChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 42.into()); - properties.insert("tokenSymbol".into(), "WOC".into()); - properties.insert("tokenDecimals".into(), 12.into()); - let para_id = 1000; - AssetHubRococoChainSpec::builder( - asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "wococo".into(), para_id }, - ) - .with_name("Wococo Asset Hub") - .with_id("asset-hub-wococo") - .with_chain_type(ChainType::Live) - .with_genesis_config_patch(asset_hub_rococo_genesis( - // initial collators. - vec![ - // 5C8RGkS8t5K93fB2hkgKbvSYs5iG6AknJMuQmbBDeazon9Lj - ( - hex!("02d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534").into(), - hex!("02d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534") - .unchecked_into(), - ), - // 5GePeDZQeBagXH7kH5QPKnQKi39Z5hoYFB5FmUtEvc4yxKej - ( - hex!("caa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d631814").into(), - hex!("caa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d631814") - .unchecked_into(), - ), - // 5CfnTTb9NMJDNKDntA83mHKoedZ7wjDC8ypLCTDd4NwUx3zv - ( - hex!("1ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f965").into(), - hex!("1ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f965") - .unchecked_into(), - ), - // 5EqheiwiG22gvGpN7cvrbeaQzhg7rzsYYVkYK4yj5vRrTQRQ - ( - hex!("7ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66").into(), - hex!("7ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66") - .unchecked_into(), - ), - ], - Vec::new(), + ASSET_HUB_ROCOCO_ED * 524_288, para_id.into(), )) .with_properties(properties) @@ -792,6 +724,7 @@ pub fn asset_hub_wococo_genesis_config() -> AssetHubWococoChainSpec { fn asset_hub_rococo_genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, + endowment: AssetHubBalance, id: ParaId, ) -> serde_json::Value { serde_json::json!({ @@ -799,7 +732,7 @@ fn asset_hub_rococo_genesis( balances: endowed_accounts .iter() .cloned() - .map(|k| (k, ASSET_HUB_ROCOCO_ED * 524_288)) + .map(|k| (k, endowment)) .collect(), }, "parachainInfo": asset_hub_rococo_runtime::ParachainInfoConfig { diff --git a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs index 71fb9e5b140..94cef106001 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs @@ -29,9 +29,6 @@ pub enum BridgeHubRuntimeType { // used by benchmarks RococoDevelopment, - Wococo, - WococoLocal, - Kusama, KusamaLocal, // used by benchmarks @@ -66,8 +63,6 @@ impl FromStr for BridgeHubRuntimeType { rococo::BRIDGE_HUB_ROCOCO => Ok(BridgeHubRuntimeType::Rococo), rococo::BRIDGE_HUB_ROCOCO_LOCAL => Ok(BridgeHubRuntimeType::RococoLocal), rococo::BRIDGE_HUB_ROCOCO_DEVELOPMENT => Ok(BridgeHubRuntimeType::RococoDevelopment), - wococo::BRIDGE_HUB_WOCOCO => Ok(BridgeHubRuntimeType::Wococo), - wococo::BRIDGE_HUB_WOCOCO_LOCAL => Ok(BridgeHubRuntimeType::WococoLocal), _ => Err(format!("Value '{}' is not configured yet", value)), } } @@ -94,8 +89,6 @@ impl BridgeHubRuntimeType { BridgeHubRuntimeType::RococoLocal | BridgeHubRuntimeType::RococoDevelopment => Ok(Box::new(rococo::BridgeHubChainSpec::from_json_file(path)?)), - BridgeHubRuntimeType::Wococo | BridgeHubRuntimeType::WococoLocal => - Ok(Box::new(wococo::BridgeHubChainSpec::from_json_file(path)?)), } } @@ -171,17 +164,6 @@ impl BridgeHubRuntimeType { Some("Bob".to_string()), |_| (), ))), - BridgeHubRuntimeType::Wococo => - Ok(Box::new(wococo::BridgeHubChainSpec::from_json_bytes( - &include_bytes!("../../chain-specs/bridge-hub-wococo.json")[..], - )?)), - BridgeHubRuntimeType::WococoLocal => Ok(Box::new(wococo::local_config( - wococo::BRIDGE_HUB_WOCOCO_LOCAL, - "Wococo BridgeHub Local", - "wococo-local", - ParaId::new(1014), - Some("Bob".to_string()), - ))), } } } @@ -309,22 +291,9 @@ pub mod rococo { "polkadotXcm": { "safeXcmVersion": Some(SAFE_XCM_VERSION), }, - - "bridgeWococoGrandpa": { - "owner": bridges_pallet_owner.clone(), - }, "bridgeWestendGrandpa": { "owner": bridges_pallet_owner.clone(), }, - "bridgeRococoGrandpa": { - "owner": bridges_pallet_owner.clone(), - }, - "bridgeRococoMessages": { - "owner": bridges_pallet_owner.clone(), - }, - "bridgeWococoMessages": { - "owner": bridges_pallet_owner.clone(), - }, "bridgeWestendMessages": { "owner": bridges_pallet_owner.clone(), }, @@ -332,37 +301,6 @@ pub mod rococo { } } -/// Sub-module for Wococo setup (reuses stuff from Rococo) -pub mod wococo { - use super::ParaId; - use crate::chain_spec::bridge_hubs::rococo; - - pub(crate) const BRIDGE_HUB_WOCOCO: &str = "bridge-hub-wococo"; - pub(crate) const BRIDGE_HUB_WOCOCO_LOCAL: &str = "bridge-hub-wococo-local"; - - pub type BridgeHubChainSpec = rococo::BridgeHubChainSpec; - pub type RuntimeApi = rococo::RuntimeApi; - - pub fn local_config( - id: &str, - chain_name: &str, - relay_chain: &str, - para_id: ParaId, - bridges_pallet_owner_seed: Option, - ) -> BridgeHubChainSpec { - rococo::local_config( - id, - chain_name, - relay_chain, - para_id, - bridges_pallet_owner_seed, - |properties| { - properties.insert("tokenSymbol".into(), "WOOK".into()); - }, - ) - } -} - /// Sub-module for Kusama setup pub mod kusama { use super::{BridgeHubBalance, ParaId}; diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 2799175b8ee..7bede22fea7 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -43,7 +43,6 @@ enum Runtime { AssetHubPolkadot, AssetHubKusama, AssetHubRococo, - AssetHubWococo, AssetHubWestend, Penpal(ParaId), ContractsRococo, @@ -95,8 +94,6 @@ fn runtime(id: &str) -> Runtime { Runtime::AssetHubKusama } else if id.starts_with("asset-hub-rococo") { Runtime::AssetHubRococo - } else if id.starts_with("asset-hub-wococo") { - Runtime::AssetHubWococo } else if id.starts_with("asset-hub-westend") | id.starts_with("westmint") { Runtime::AssetHubWestend } else if id.starts_with("penpal") { @@ -186,19 +183,6 @@ fn load_spec(id: &str) -> std::result::Result, String> { &include_bytes!("../chain-specs/asset-hub-rococo.json")[..], )?), - // -- Asset Hub Wococo - "asset-hub-wococo-dev" => - Box::new(chain_spec::asset_hubs::asset_hub_wococo_development_config()), - "asset-hub-wococo-local" => - Box::new(chain_spec::asset_hubs::asset_hub_wococo_local_config()), - // the chain spec as used for generating the upgrade genesis values - "asset-hub-wococo-genesis" => - Box::new(chain_spec::asset_hubs::asset_hub_wococo_genesis_config()), - "asset-hub-wococo" => - Box::new(chain_spec::asset_hubs::AssetHubWococoChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/asset-hub-wococo.json")[..], - )?), - // -- Asset Hub Westend "asset-hub-westend-dev" | "westmint-dev" => Box::new(chain_spec::asset_hubs::asset_hub_westend_development_config()), @@ -302,8 +286,6 @@ fn load_spec(id: &str) -> std::result::Result, String> { Box::new(chain_spec::asset_hubs::AssetHubKusamaChainSpec::from_json_file(path)?), Runtime::AssetHubRococo => Box::new(chain_spec::asset_hubs::AssetHubRococoChainSpec::from_json_file(path)?), - Runtime::AssetHubWococo => - Box::new(chain_spec::asset_hubs::AssetHubWococoChainSpec::from_json_file(path)?), Runtime::AssetHubWestend => Box::new( chain_spec::asset_hubs::AssetHubWestendChainSpec::from_json_file(path)?, ), @@ -464,7 +446,7 @@ macro_rules! construct_partials { )?; $code }, - Runtime::AssetHubRococo | Runtime::AssetHubWococo => { + Runtime::AssetHubRococo => { let $partials = new_partial::( &$config, crate::service::aura_build_import_queue::<_, AuraId>, @@ -522,14 +504,6 @@ macro_rules! construct_partials { )?; $code }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Wococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WococoLocal => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, }, Runtime::CollectivesPolkadot => { let $partials = new_partial::( @@ -605,7 +579,7 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - Runtime::AssetHubRococo | Runtime::AssetHubWococo => { + Runtime::AssetHubRococo => { runner.async_run(|$config| { let $components = new_partial::( &$config, @@ -739,18 +713,6 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Wococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WococoLocal => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - } } }, Runtime::Penpal(_) | Runtime::Default => { @@ -978,7 +940,7 @@ pub fn run() -> Result<()> { .await .map(|r| r.0) .map_err(Into::into), - Runtime::AssetHubRococo | Runtime::AssetHubWococo => crate::service::start_asset_hub_node::< + Runtime::AssetHubRococo => crate::service::start_asset_hub_node::< asset_hub_rococo_runtime::RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) @@ -1077,14 +1039,6 @@ chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), - chain_spec::bridge_hubs::BridgeHubRuntimeType::Wococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WococoLocal => - crate::service::start_generic_aura_node::< - chain_spec::bridge_hubs::wococo::RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) - .await - .map(|r| r.0), } .map_err(Into::into), Runtime::Penpal(_) | Runtime::Default => diff --git a/cumulus/scripts/bridges_rococo_westend.sh b/cumulus/scripts/bridges_rococo_westend.sh index 82b5f1942b2..9b3bd350276 100755 --- a/cumulus/scripts/bridges_rococo_westend.sh +++ b/cumulus/scripts/bridges_rococo_westend.sh @@ -170,8 +170,6 @@ function run_relay() { --bridge-hub-rococo-port 8943 \ --bridge-hub-rococo-version-mode Auto \ --bridge-hub-rococo-signer //Charlie \ - --westend-headers-to-bridge-hub-rococo-signer //Bob \ - --westend-parachains-to-bridge-hub-rococo-signer //Bob \ --bridge-hub-rococo-transactions-mortality 4 \ --westend-host localhost \ --westend-port 9945 \ @@ -180,8 +178,6 @@ function run_relay() { --bridge-hub-westend-port 8945 \ --bridge-hub-westend-version-mode Auto \ --bridge-hub-westend-signer //Charlie \ - --rococo-headers-to-bridge-hub-westend-signer //Bob \ - --rococo-parachains-to-bridge-hub-westend-signer //Bob \ --bridge-hub-westend-transactions-mortality 4 \ --lane "${LANE_ID}" } @@ -209,7 +205,7 @@ case "$1" in "ws://127.0.0.1:9910" \ "//Alice" \ "$GLOBAL_CONSENSUS_WESTEND_ASSET_HUB_WESTEND_1000_SOVEREIGN_ACCOUNT" \ - $((1000000000 + 50000000000 * 20)) + $((1000000000000 + 50000000000 * 20)) # HRMP open_hrmp_channels \ "ws://127.0.0.1:9942" \ @@ -227,19 +223,19 @@ case "$1" in "ws://127.0.0.1:8943" \ "//Alice" \ "$ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO" \ - $((1000000000 + 50000000000 * 20)) + $((1000000000000 + 50000000000 * 20)) # drip SA of lane dedicated to asset hub for paying rewards for delivery transfer_balance \ "ws://127.0.0.1:8943" \ "//Alice" \ "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_ThisChain" \ - $((1000000000 + 2000000000000)) + $((1000000000000 + 2000000000000)) # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation transfer_balance \ "ws://127.0.0.1:8943" \ "//Alice" \ "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_BridgedChain" \ - $((1000000000 + 2000000000000)) + $((1000000000000 + 2000000000000)) ;; init-asset-hub-westend-local) ensure_polkadot_js_api @@ -258,7 +254,7 @@ case "$1" in "ws://127.0.0.1:9010" \ "//Alice" \ "$GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT" \ - $((1000000000 + 50000000000 * 20)) + $((1000000000000000 + 50000000000 * 20)) # HRMP open_hrmp_channels \ "ws://127.0.0.1:9945" \ @@ -275,19 +271,19 @@ case "$1" in "ws://127.0.0.1:8945" \ "//Alice" \ "$ASSET_HUB_WESTEND_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WESTEND" \ - $((1000000000 + 50000000000 * 20)) + $((1000000000000000 + 50000000000 * 20)) # drip SA of lane dedicated to asset hub for paying rewards for delivery transfer_balance \ "ws://127.0.0.1:8945" \ "//Alice" \ "$ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_ThisChain" \ - $((1000000000 + 2000000000000)) + $((1000000000000000 + 2000000000000)) # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation transfer_balance \ "ws://127.0.0.1:8945" \ "//Alice" \ "$ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_BridgedChain" \ - $((1000000000 + 2000000000000)) + $((1000000000000000 + 2000000000000)) ;; reserve-transfer-assets-from-asset-hub-rococo-local) ensure_polkadot_js_api @@ -309,7 +305,7 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": 140000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": 40000000000 } } ] }')" \ 0 \ "Unlimited" ;; diff --git a/cumulus/scripts/bridges_rococo_wococo.sh b/cumulus/scripts/bridges_rococo_wococo.sh deleted file mode 100755 index dd7c7062a3b..00000000000 --- a/cumulus/scripts/bridges_rococo_wococo.sh +++ /dev/null @@ -1,386 +0,0 @@ -#!/bin/bash - -# import common functions -source "$(dirname "$0")"/bridges_common.sh - -# Expected sovereign accounts. -# -# Generated by: -# -# #[test] -# fn generate_sovereign_accounts() { -# use sp_core::crypto::Ss58Codec; -# use polkadot_parachain_primitives::primitives::Sibling; -# -# parameter_types! { -# pub UniversalLocationAHR: InteriorMultiLocation = X2(GlobalConsensus(Rococo), Parachain(1000)); -# pub UniversalLocationAHW: InteriorMultiLocation = X2(GlobalConsensus(Wococo), Parachain(1000)); -# } -# -# // SS58=42 -# println!("GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusConvertsFor::::convert_location( -# &MultiLocation { parents: 2, interior: X1(GlobalConsensus(Rococo)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusParachainConvertsFor::::convert_location( -# &MultiLocation { parents: 2, interior: X2(GlobalConsensus(Rococo), Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("ASSET_HUB_WOCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WOCOCO=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# SiblingParachainConvertsVia::::convert_location( -# &MultiLocation { parents: 1, interior: X1(Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# -# // SS58=42 -# println!("GLOBAL_CONSENSUS_WOCOCO_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusConvertsFor::::convert_location( -# &MultiLocation { parents: 2, interior: X1(GlobalConsensus(Wococo)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("GLOBAL_CONSENSUS_WOCOCO_ASSET_HUB_WOCOCO_1000_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusParachainConvertsFor::::convert_location( -# &MultiLocation { parents: 2, interior: X2(GlobalConsensus(Wococo), Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# SiblingParachainConvertsVia::::convert_location( -# &MultiLocation { parents: 1, interior: X1(Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# } -GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT="5GxRGwT8bU1JeBPTUXc7LEjZMxNrK8MyL2NJnkWFQJTQ4sii" -GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT="5CfNu7eH3SJvqqPt3aJh38T8dcFvhGzEohp9tsd41ANhXDnQ" -ASSET_HUB_WOCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WOCOCO="5Eg2fntNprdN3FgH4sfEaaZhYtddZQSQUqvYJ1f2mLtinVhV" -GLOBAL_CONSENSUS_WOCOCO_SOVEREIGN_ACCOUNT="5EWw2NzfPr2DCahourp33cya6bGWEJViTnJN6Z2ruFevpJML" -GLOBAL_CONSENSUS_WOCOCO_ASSET_HUB_WOCOCO_1000_SOVEREIGN_ACCOUNT="5EJX8L4dwGyYnCsjZ91LfWAsm3rCN8vY2AYvT4mauMEjsrQz" -ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO="5Eg2fntNprdN3FgH4sfEaaZhYtddZQSQUqvYJ1f2mLtinVhV" - -# Expected sovereign accounts for rewards on BridgeHubs. -# -# Generated by: -#[test] -#fn generate_sovereign_accounts_for_rewards() { -# use bp_messages::LaneId; -# use bp_relayers::{PayRewardFromAccount, RewardsAccountOwner, RewardsAccountParams}; -# use sp_core::crypto::Ss58Codec; -# -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_ThisChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 1]), -# *b"bhwo", -# RewardsAccountOwner::ThisChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_BridgedChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 1]), -# *b"bhwo", -# RewardsAccountOwner::BridgedChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -# -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_ThisChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 1]), -# *b"bhro", -# RewardsAccountOwner::ThisChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_BridgedChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 1]), -# *b"bhro", -# RewardsAccountOwner::BridgedChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -#} -ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_ThisChain="5EHnXaT5BhiS8YRPMeHi97YHofTtNx4pLNb8wR8TwjVq1gzU" -ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_BridgedChain="5EHnXaT5BhiS8YRPMeHyt95svA95qWAh53XeVMpJQZNZHAzj" -ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_ThisChain="5EHnXaT5BhiS8YRNuCukWXTQdAqARjjXmpjehjx1YZNE5keZ" -ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_BridgedChain="5EHnXaT5BhiS8YRNuCv2FYzzjfWMtHqQWVgAFgdr1PExMN94" - -LANE_ID="00000001" - -function init_ro_wo() { - ensure_relayer - - RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay init-bridge rococo-to-bridge-hub-wococo \ - --source-host localhost \ - --source-port 9942 \ - --source-version-mode Auto \ - --target-host localhost \ - --target-port 8945 \ - --target-version-mode Auto \ - --target-signer //Bob -} - -function init_wo_ro() { - ensure_relayer - - RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay init-bridge wococo-to-bridge-hub-rococo \ - --source-host localhost \ - --source-port 9945 \ - --source-version-mode Auto \ - --target-host localhost \ - --target-port 8943 \ - --target-version-mode Auto \ - --target-signer //Bob -} - -function run_relay() { - ensure_relayer - - RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay relay-headers-and-messages bridge-hub-rococo-bridge-hub-wococo \ - --rococo-host localhost \ - --rococo-port 9942 \ - --rococo-version-mode Auto \ - --bridge-hub-rococo-host localhost \ - --bridge-hub-rococo-port 8943 \ - --bridge-hub-rococo-version-mode Auto \ - --bridge-hub-rococo-signer //Charlie \ - --wococo-headers-to-bridge-hub-rococo-signer //Bob \ - --wococo-parachains-to-bridge-hub-rococo-signer //Bob \ - --bridge-hub-rococo-transactions-mortality 4 \ - --wococo-host localhost \ - --wococo-port 9945 \ - --wococo-version-mode Auto \ - --bridge-hub-wococo-host localhost \ - --bridge-hub-wococo-port 8945 \ - --bridge-hub-wococo-version-mode Auto \ - --bridge-hub-wococo-signer //Charlie \ - --rococo-headers-to-bridge-hub-wococo-signer //Bob \ - --rococo-parachains-to-bridge-hub-wococo-signer //Bob \ - --bridge-hub-wococo-transactions-mortality 4 \ - --lane "${LANE_ID}" -} - -case "$1" in - run-relay) - init_ro_wo - init_wo_ro - run_relay - ;; - init-asset-hub-rococo-local) - ensure_polkadot_js_api - # create foreign assets for native Wococo token (governance call on Rococo) - force_create_foreign_asset \ - "ws://127.0.0.1:9942" \ - "//Alice" \ - 1000 \ - "ws://127.0.0.1:9910" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X1": { "GlobalConsensus": "Wococo" } } }')" \ - "$GLOBAL_CONSENSUS_WOCOCO_SOVEREIGN_ACCOUNT" \ - 10000000000 \ - true - # drip SA which holds reserves - transfer_balance \ - "ws://127.0.0.1:9910" \ - "//Alice" \ - "$GLOBAL_CONSENSUS_WOCOCO_ASSET_HUB_WOCOCO_1000_SOVEREIGN_ACCOUNT" \ - $((1000000000 + 50000000000 * 20)) - # HRMP - open_hrmp_channels \ - "ws://127.0.0.1:9942" \ - "//Alice" \ - 1000 1013 4 524288 - open_hrmp_channels \ - "ws://127.0.0.1:9942" \ - "//Alice" \ - 1013 1000 4 524288 - ;; - init-bridge-hub-rococo-local) - ensure_polkadot_js_api - # SA of sibling asset hub pays for the execution - transfer_balance \ - "ws://127.0.0.1:8943" \ - "//Alice" \ - "$ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO" \ - $((1000000000 + 50000000000 * 20)) - # drip SA of lane dedicated to asset hub for paying rewards for delivery - transfer_balance \ - "ws://127.0.0.1:8943" \ - "//Alice" \ - "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_ThisChain" \ - $((1000000000 + 2000000000000)) - # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation - transfer_balance \ - "ws://127.0.0.1:8943" \ - "//Alice" \ - "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_BridgedChain" \ - $((1000000000 + 2000000000000)) - ;; - init-asset-hub-wococo-local) - ensure_polkadot_js_api - # set Wococo flavor - set_storage with: - # - `key` is `HexDisplay::from(&asset_hub_rococo_runtime::xcm_config::Flavor::key())` - # - `value` is `HexDisplay::from(&asset_hub_rococo_runtime::RuntimeFlavor::Wococo.encode())` - set_storage \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1000 \ - "ws://127.0.0.1:9010" \ - "$(jq --null-input '[["0x48297505634037ef48c848c99c0b1f1b", "0x01"]]')" - # create foreign assets for native Rococo token (governance call on Wococo) - force_create_foreign_asset \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1000 \ - "ws://127.0.0.1:9010" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } }')" \ - "$GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT" \ - 10000000000 \ - true - # drip SA which holds reserves - transfer_balance \ - "ws://127.0.0.1:9010" \ - "//Alice" \ - "$GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT" \ - $((1000000000 + 50000000000 * 20)) - # HRMP - open_hrmp_channels \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1000 1014 4 524288 - open_hrmp_channels \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1014 1000 4 524288 - ;; - init-bridge-hub-wococo-local) - # set Wococo flavor - set_storage with: - # - `key` is `HexDisplay::from(&bridge_hub_rococo_runtime::xcm_config::Flavor::key())` - # - `value` is `HexDisplay::from(&bridge_hub_rococo_runtime::RuntimeFlavor::Wococo.encode())` - set_storage \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1014 \ - "ws://127.0.0.1:8945" \ - "$(jq --null-input '[["0x48297505634037ef48c848c99c0b1f1b", "0x01"]]')" - # SA of sibling asset hub pays for the execution - transfer_balance \ - "ws://127.0.0.1:8945" \ - "//Alice" \ - "$ASSET_HUB_WOCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WOCOCO" \ - $((1000000000 + 50000000000 * 20)) - # drip SA of lane dedicated to asset hub for paying rewards for delivery - transfer_balance \ - "ws://127.0.0.1:8945" \ - "//Alice" \ - "$ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_ThisChain" \ - $((1000000000 + 2000000000000)) - # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation - transfer_balance \ - "ws://127.0.0.1:8945" \ - "//Alice" \ - "$ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_BridgedChain" \ - $((1000000000 + 2000000000000)) - ;; - reserve-transfer-assets-from-asset-hub-rococo-local) - ensure_polkadot_js_api - # send ROCs to Alice account on AHW - limited_reserve_transfer_assets \ - "ws://127.0.0.1:9910" \ - "//Alice" \ - "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Wococo" }, { "Parachain": 1000 } ] } } }')" \ - "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 200000000000 } } ] }')" \ - 0 \ - "Unlimited" - ;; - reserve-transfer-assets-from-asset-hub-wococo-local) - ensure_polkadot_js_api - # send WOCs to Alice account on AHR - limited_reserve_transfer_assets \ - "ws://127.0.0.1:9010" \ - "//Alice" \ - "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ - "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 150000000000 } } ] }')" \ - 0 \ - "Unlimited" - ;; - claim-rewards-bridge-hub-rococo-local) - ensure_polkadot_js_api - # bhwo -> [62, 68, 77, 6f] -> 0x6268776f - claim_rewards \ - "ws://127.0.0.1:8943" \ - "//Charlie" \ - "0x${LANE_ID}" \ - "0x6268776f" \ - "ThisChain" - claim_rewards \ - "ws://127.0.0.1:8943" \ - "//Charlie" \ - "0x${LANE_ID}" \ - "0x6268776f" \ - "BridgedChain" - ;; - claim-rewards-bridge-hub-wococo-local) - # bhro -> [62, 68, 72, 6f] -> 0x6268726f - claim_rewards \ - "ws://127.0.0.1:8945" \ - "//Charlie" \ - "0x${LANE_ID}" \ - "0x6268726f" \ - "ThisChain" - claim_rewards \ - "ws://127.0.0.1:8945" \ - "//Charlie" \ - "0x${LANE_ID}" \ - "0x6268726f" \ - "BridgedChain" - ;; - stop) - pkill -f polkadot - pkill -f parachain - ;; - import) - # to avoid trigger anything here - ;; - *) - echo "A command is require. Supported commands for: - Local (zombienet) run: - - run-relay - - init-asset-hub-rococo-local - - init-bridge-hub-rococo-local - - init-asset-hub-wococo-local - - init-bridge-hub-wococo-local - - reserve-transfer-assets-from-asset-hub-rococo-local - - reserve-transfer-assets-from-asset-hub-wococo-local - - claim-rewards-bridge-hub-rococo-local - - claim-rewards-bridge-hub-wococo-local"; - exit 1 - ;; -esac diff --git a/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml b/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml index a117942858e..99a7d0035b5 100644 --- a/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml +++ b/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml @@ -41,8 +41,7 @@ cumulus_based = true ws_port = 8943 args = [ "-lparachain=debug,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--rpc-port 48933" + "--force-authoring" ] # run bob as parachain collator @@ -54,8 +53,7 @@ cumulus_based = true ws_port = 8944 args = [ "-lparachain=trace,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--rpc-port 48934" + "--force-authoring" ] [[parachains]] @@ -69,16 +67,14 @@ cumulus_based = true ws_port = 9910 command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--rpc-port 58933" + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" ] [[parachains.collators]] name = "asset-hub-rococo-collator2" command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--rpc-port 58833" + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" ] #[[hrmp_channels]] diff --git a/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml b/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml index 4c345d3825c..1919d1c63f2 100644 --- a/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml +++ b/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml @@ -41,8 +41,7 @@ cumulus_based = true ws_port = 8945 args = [ "-lparachain=debug,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--rpc-port 48935" + "--force-authoring" ] # run bob as parachain collator @@ -54,8 +53,7 @@ cumulus_based = true ws_port = 8946 args = [ "-lparachain=trace,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--rpc-port 48936" + "--force-authoring" ] [[parachains]] @@ -69,16 +67,14 @@ cumulus_based = true ws_port = 9010 command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--rpc-port 38933" + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" ] [[parachains.collators]] name = "asset-hub-westend-collator2" command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--rpc-port 38833" + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" ] #[[hrmp_channels]] diff --git a/cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml b/cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml deleted file mode 100644 index ae5cf641f66..00000000000 --- a/cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml +++ /dev/null @@ -1,94 +0,0 @@ -[settings] -node_spawn_timeout = 240 - -[relaychain] -default_command = "{{POLKADOT_BINARY_PATH}}" -default_args = [ "-lparachain=debug,xcm=trace" ] -chain = "wococo-local" - - [[relaychain.nodes]] - name = "alice-wococo-validator" - validator = true - rpc_port = 9935 - ws_port = 9945 - balance = 2000000000000 - - [[relaychain.nodes]] - name = "bob-wococo-validator" - validator = true - rpc_port = 9936 - ws_port = 9946 - balance = 2000000000000 - - [[relaychain.nodes]] - name = "charlie-wococo-validator" - validator = true - rpc_port = 9937 - ws_port = 9947 - balance = 2000000000000 - -[[parachains]] -id = 1014 -chain = "bridge-hub-wococo-local" -cumulus_based = true - - # run alice as parachain collator - [[parachains.collators]] - name = "bridge-hub-wococo-collator1" - validator = true - command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}" - rpc_port = 8935 - ws_port = 8945 - args = [ - "-lparachain=debug,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--port 41335", "--rpc-port 48935" - ] - - # run bob as parachain collator - [[parachains.collators]] - name = "bridge-hub-wococo-collator2" - validator = true - command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}" - rpc_port = 8936 - ws_port = 8946 - args = [ - "-lparachain=trace,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--port 41336", "--rpc-port 48936" - ] - -[[parachains]] -id = 1000 -chain = "asset-hub-wococo-local" -cumulus_based = true - - [[parachains.collators]] - name = "asset-hub-wococo-collator1" - rpc_port = 9011 - ws_port = 9010 - command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO}}" - args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--port 31333", "--rpc-port 38933" - ] - - [[parachains.collators]] - name = "asset-hub-wococo-collator2" - command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO}}" - args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--port 31433", "--rpc-port 38833" - ] - -#[[hrmp_channels]] -#sender = 1000 -#recipient = 1014 -#max_capacity = 4 -#max_message_size = 524288 -# -#[[hrmp_channels]] -#sender = 1014 -#recipient = 1000 -#max_capacity = 4 -#max_message_size = 524288 -- GitLab From d41d9f896a58136d8869bde701d55f367d2bc195 Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 15 Nov 2023 17:31:01 +0100 Subject: [PATCH 44/74] Remove non-needed hook. --- polkadot/runtime/parachains/src/assigner_bulk/mod.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_bulk/mod.rs b/polkadot/runtime/parachains/src/assigner_bulk/mod.rs index 801762dd6e9..b4660900e79 100644 --- a/polkadot/runtime/parachains/src/assigner_bulk/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_bulk/mod.rs @@ -210,12 +210,7 @@ pub mod pallet { >; #[pallet::hooks] - impl Hooks> for Pallet { - fn on_initialize(_now: BlockNumberFor) -> Weight { - //TODO: Implement - T::DbWeight::get().reads_writes(0, 0) - } - } + impl Hooks> for Pallet {} #[pallet::call] impl Pallet {} -- GitLab From ea4085ab7448bb557a1558a25af164cf364e88d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 15 Nov 2023 23:54:08 +0100 Subject: [PATCH 45/74] frame-system: Add `last_runtime_upgrade_spec_version` (#2351) Adds a function for querying the last runtime upgrade spec version. This can be useful for when writing runtime level migrations to ensure that they are not executed multiple times. An example would be a session key migration. --------- Co-authored-by: Liam Aharon Co-authored-by: Oliver Tale-Yazdi --- Cargo.lock | 1 + substrate/frame/executive/src/lib.rs | 52 ++++++++++++++-------------- substrate/frame/system/Cargo.toml | 1 + substrate/frame/system/src/lib.rs | 19 ++++++++++ substrate/frame/system/src/tests.rs | 25 ++++++++++++- 5 files changed, 71 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0c374a90a18..7854042ddbc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5904,6 +5904,7 @@ version = "4.0.0-dev" dependencies = [ "cfg-if", "criterion 0.4.0", + "docify", "frame-support", "log", "parity-scale-codec", diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index e2c906c1bf6..dec1fe158bd 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -487,6 +487,12 @@ where let mut weight = Weight::zero(); if Self::runtime_upgraded() { weight = weight.saturating_add(Self::execute_on_runtime_upgrade()); + + frame_system::LastRuntimeUpgrade::::put( + frame_system::LastRuntimeUpgradeInfo::from( + >::get(), + ), + ); } >::initialize(block_number, parent_hash, digest); weight = weight.saturating_add(::note_finished_initialize(); } - /// Returns if the runtime was upgraded since the last time this function was called. + /// Returns if the runtime has been upgraded, based on [`frame_system::LastRuntimeUpgrade`]. fn runtime_upgraded() -> bool { let last = frame_system::LastRuntimeUpgrade::::get(); let current = >::get(); - if last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) { - frame_system::LastRuntimeUpgrade::::put( - frame_system::LastRuntimeUpgradeInfo::from(current), - ); - true - } else { - false - } + last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) } fn initial_checks(block: &Block) { @@ -755,7 +754,7 @@ mod tests { traits::{fungible, ConstU32, ConstU64, ConstU8, Currency}, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightToFee}, }; - use frame_system::{ChainContext, LastRuntimeUpgradeInfo}; + use frame_system::{ChainContext, LastRuntimeUpgrade, LastRuntimeUpgradeInfo}; use pallet_balances::Call as BalancesCall; use pallet_transaction_payment::CurrencyAdapter; @@ -994,6 +993,9 @@ mod tests { sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); System::deposit_event(frame_system::Event::CodeUpdated); + + assert_eq!(0, System::last_runtime_upgrade_spec_version()); + Weight::from_parts(100, 0) } } @@ -1356,17 +1358,13 @@ mod tests { new_test_ext(1).execute_with(|| { RuntimeVersionTestValues::mutate(|v| *v = Default::default()); // It should be added at genesis - assert!(frame_system::LastRuntimeUpgrade::::exists()); + assert!(LastRuntimeUpgrade::::exists()); assert!(!Executive::runtime_upgraded()); RuntimeVersionTestValues::mutate(|v| { *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "".into() }), - frame_system::LastRuntimeUpgrade::::get(), - ); RuntimeVersionTestValues::mutate(|v| { *v = sp_version::RuntimeVersion { @@ -1376,27 +1374,18 @@ mod tests { } }); assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), - frame_system::LastRuntimeUpgrade::::get(), - ); RuntimeVersionTestValues::mutate(|v| { *v = sp_version::RuntimeVersion { - spec_version: 1, - spec_name: "test".into(), + spec_version: 0, impl_version: 2, ..Default::default() } }); assert!(!Executive::runtime_upgraded()); - frame_system::LastRuntimeUpgrade::::take(); + LastRuntimeUpgrade::::take(); assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), - frame_system::LastRuntimeUpgrade::::get(), - ); }) } @@ -1444,6 +1433,10 @@ mod tests { assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); + assert_eq!( + Some(RuntimeVersionTestValues::get().into()), + LastRuntimeUpgrade::::get(), + ) }); } @@ -1519,6 +1512,9 @@ mod tests { #[test] fn all_weights_are_recorded_correctly() { + // Reset to get the correct new genesis below. + RuntimeVersionTestValues::take(); + new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called for maximum complexity RuntimeVersionTestValues::mutate(|v| { @@ -1535,6 +1531,10 @@ mod tests { Digest::default(), )); + // Reset the last runtime upgrade info, to make the second call to `on_runtime_upgrade` + // succeed. + LastRuntimeUpgrade::::take(); + // All weights that show up in the `initialize_block_impl` let custom_runtime_upgrade_weight = CustomOnRuntimeUpgrade::on_runtime_upgrade(); let runtime_upgrade_weight = diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index f7733e312c3..b61b4d531e2 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -25,6 +25,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false, feat sp-std = { path = "../../primitives/std", default-features = false} sp-version = { path = "../../primitives/version", default-features = false, features = ["serde"] } sp-weights = { path = "../../primitives/weights", default-features = false, features = ["serde"] } +docify = "0.2.0" [dev-dependencies] criterion = "0.4.0" diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 0e394a11041..1b8dd6a9367 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -1094,6 +1094,25 @@ pub enum DecRefStatus { } impl Pallet { + /// Returns the `spec_version` of the last runtime upgrade. + /// + /// This function is useful for writing guarded runtime migrations in the runtime. A runtime + /// migration can use the `spec_version` to ensure that it isn't applied twice. This works + /// similar as the storage version for pallets. + /// + /// This functions returns the `spec_version` of the last runtime upgrade while executing the + /// runtime migrations + /// [`on_runtime_upgrade`](frame_support::traits::OnRuntimeUpgrade::on_runtime_upgrade) + /// function. After all migrations are executed, this will return the `spec_version` of the + /// current runtime until there is another runtime upgrade. + /// + /// Example: + #[doc = docify::embed!("src/tests.rs", last_runtime_upgrade_spec_version_usage)] + pub fn last_runtime_upgrade_spec_version() -> u32 { + LastRuntimeUpgrade::::get().map_or(0, |l| l.spec_version.0) + } + + /// Returns true if the given account exists. pub fn account_exists(who: &T::AccountId) -> bool { Account::::contains_key(who) } diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs index 165df688b1c..6fbddaaf229 100644 --- a/substrate/frame/system/src/tests.rs +++ b/substrate/frame/system/src/tests.rs @@ -19,7 +19,7 @@ use crate::*; use frame_support::{ assert_noop, assert_ok, dispatch::{Pays, PostDispatchInfo, WithPostDispatchInfo}, - traits::WhitelistedStorageKeys, + traits::{OnRuntimeUpgrade, WhitelistedStorageKeys}, }; use std::collections::BTreeSet; @@ -773,3 +773,26 @@ pub fn from_actual_ref_time(ref_time: Option) -> PostDispatchInfo { pub fn from_post_weight_info(ref_time: Option, pays_fee: Pays) -> PostDispatchInfo { PostDispatchInfo { actual_weight: ref_time.map(|t| Weight::from_all(t)), pays_fee } } + +#[docify::export] +#[test] +fn last_runtime_upgrade_spec_version_usage() { + struct Migration; + + impl OnRuntimeUpgrade for Migration { + fn on_runtime_upgrade() -> Weight { + // Ensure to compare the spec version against some static version to prevent applying + // the same migration multiple times. + // + // `1337` here is the spec version of the runtime running on chain. If there is maybe + // a runtime upgrade in the pipeline of being applied, you should use the spec version + // of this upgrade. + if System::last_runtime_upgrade_spec_version() > 1337 { + return Weight::zero(); + } + + // Do the migration. + Weight::zero() + } + } +} -- GitLab From e07671e3761b7a37b796c0b56aae6d6214338a43 Mon Sep 17 00:00:00 2001 From: Bradley Olson <34992650+BradleyOlson64@users.noreply.github.com> Date: Thu, 16 Nov 2023 01:34:26 -0800 Subject: [PATCH 46/74] Added new checks to add_assignment (#2332) We now check for every requirement of `add_assignment` described in [RFC 5](https://github.com/polkadot-fellows/RFCs/blob/gav-corejam/text/0005-coretime-interface.md). These are: - Not more than 100 assignments in a schedule - Assignment parts add up to 57600 (neither overscheduled nor underscheduled) - Starting blocks must be greater than or equal to current block + 10 when schedules are added - Assignments within an added set must be unique - Assignments within an added set must be sorted --- .../parachains/src/assigner_bulk/mod.rs | 41 ++++-- .../parachains/src/assigner_bulk/tests.rs | 121 +++++++++++++----- 2 files changed, 120 insertions(+), 42 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_bulk/mod.rs b/polkadot/runtime/parachains/src/assigner_bulk/mod.rs index 801762dd6e9..8f021791ba0 100644 --- a/polkadot/runtime/parachains/src/assigner_bulk/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_bulk/mod.rs @@ -223,13 +223,20 @@ pub mod pallet { #[pallet::error] pub enum Error { AssignmentsEmpty, + TooManyAssignments, /// Assignments together exceeded 57600. OverScheduled, + /// Assignments together less than 57600 + UnderScheduled, /// assign_core is only allowed to append new assignments at the end of already existing /// ones. DisallowedInsert, /// Tried to insert a schedule for the same core and block number as an existing schedule DuplicateInsert, + /// Tried to add an unsorted set of assignments + AssignmentsNotSorted, + /// Two or more of the same assignment contained in assignment set + AssignmentsNotUnique, } } @@ -411,18 +418,32 @@ impl Pallet { assignments: Vec<(CoreAssignment, PartsOf57600)>, end_hint: Option>, ) -> Result<(), DispatchError> { - // There should be at least one assignment + // TODO: Add this assert once the calls `request_core_count` and `notify_core_count` + // have been established. assert!(core < core_count); + + // There should be at least one assignment and at most 100 ensure!(assignments.len() > 0usize, Error::::AssignmentsEmpty); + ensure!(assignments.len() <= 100usize, Error::::TooManyAssignments); + + // Checking for sort and unique manually, since we don't have access to iterator tools. + // This way of checking uniqueness only works since we also check sortedness. + let assignment_targets = assignments.iter().map(|x| &x.0); + let mut maybe_prior = None; + for target in assignment_targets { + if let Some(prior) = maybe_prior { + ensure!(target != prior, Error::::AssignmentsNotUnique); + ensure!(target > prior, Error::::AssignmentsNotSorted); + } + maybe_prior = Some(target); + } - // Check that the total parts between all assignments do not exceed 57600 - ensure!( - assignments - .iter() - .map(|assignment| assignment.1) - .fold(PartsOf57600::from(0u16), |sum, parts| sum.saturating_add(parts)) <= - PartsOf57600::from(57600u16), - Error::::OverScheduled - ); + // Check that the total parts between all assignments are equal to 57600 + let parts_sum = assignments + .iter() + .map(|assignment| assignment.1) + .fold(PartsOf57600::from(0u16), |sum, parts| sum.saturating_add(parts)); + ensure!(parts_sum <= PartsOf57600::from(57600u16), Error::::OverScheduled); + ensure!(parts_sum >= PartsOf57600::from(57600u16), Error::::UnderScheduled); CoreDescriptors::::mutate(core_idx, |core_descriptor| { let new_queue = match core_descriptor.queue { diff --git a/polkadot/runtime/parachains/src/assigner_bulk/tests.rs b/polkadot/runtime/parachains/src/assigner_bulk/tests.rs index 0d170658824..be42dad3f63 100644 --- a/polkadot/runtime/parachains/src/assigner_bulk/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_bulk/tests.rs @@ -93,7 +93,7 @@ fn assign_core_works_with_no_prior_schedule() { let core_idx = CoreIndex(0); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); // Call assign_core assert_ok!(BulkAssigner::assign_core( @@ -130,7 +130,7 @@ fn assign_core_works_with_prior_schedule() { let core_idx = CoreIndex(0); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); let default_with_next_schedule = Schedule { next_schedule: Some(15u32), ..default_test_schedule() }; @@ -182,12 +182,12 @@ fn assign_core_enforces_higher_block_number() { let core_idx = CoreIndex(0); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); // Call assign core twice to establish some schedules assert_ok!(BulkAssigner::assign_core( core_idx, - BlockNumberFor::::from(11u32), + BlockNumberFor::::from(12u32), default_test_assignments(), None, )); @@ -203,7 +203,7 @@ fn assign_core_enforces_higher_block_number() { assert_noop!( BulkAssigner::assign_core( core_idx, - BlockNumberFor::::from(10u32), + BlockNumberFor::::from(11u32), default_test_assignments(), None, ), @@ -230,20 +230,32 @@ fn assign_core_enforces_well_formed_schedule() { let core_idx = CoreIndex(0); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); - let bad_assignment_count: Vec<(CoreAssignment, PartsOf57600)> = vec![]; - let bad_parts_sum = vec![ - (CoreAssignment::Task(para_id.into()), PartsOf57600::from(57600u16)), + let empty_assignments: Vec<(CoreAssignment, PartsOf57600)> = vec![]; + let too_many_assignments = vec![(CoreAssignment::Pool, PartsOf57600::from(288u16)); 200]; + let overscheduled = vec![ (CoreAssignment::Pool, PartsOf57600::from(57600u16)), + (CoreAssignment::Task(para_id.into()), PartsOf57600::from(57600u16)), + ]; + let underscheduled = vec![(CoreAssignment::Pool, PartsOf57600::from(30000u16))]; + let not_unique = vec![ + (CoreAssignment::Pool, PartsOf57600::from(57600u16 / 2)), + (CoreAssignment::Pool, PartsOf57600::from(57600u16 / 2)), + ]; + let not_sorted = vec![ + (CoreAssignment::Task(para_id.into()), PartsOf57600::from(19200u16)), + (CoreAssignment::Pool, PartsOf57600::from(19200u16)), + (CoreAssignment::Idle, PartsOf57600::from(19200u16)), ]; - // Attempting to assign_core with bad assignments + // Attempting assign_core with malformed assignments such that all error cases + // are tested assert_noop!( BulkAssigner::assign_core( core_idx, BlockNumberFor::::from(11u32), - bad_assignment_count, + empty_assignments, None, ), Error::::AssignmentsEmpty @@ -252,11 +264,47 @@ fn assign_core_enforces_well_formed_schedule() { BulkAssigner::assign_core( core_idx, BlockNumberFor::::from(11u32), - bad_parts_sum, + too_many_assignments, + None, + ), + Error::::TooManyAssignments + ); + assert_noop!( + BulkAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + overscheduled, None, ), Error::::OverScheduled ); + assert_noop!( + BulkAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + underscheduled, + None, + ), + Error::::UnderScheduled + ); + assert_noop!( + BulkAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + not_unique, + None, + ), + Error::::AssignmentsNotUnique + ); + assert_noop!( + BulkAssigner::assign_core( + core_idx, + BlockNumberFor::::from(11u32), + not_sorted, + None, + ), + Error::::AssignmentsNotSorted + ); }); } @@ -265,12 +313,12 @@ fn next_schedule_always_points_to_next_work_plan_item() { let core_idx = CoreIndex(0); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); - let start_1 = 10u32; - let start_2 = 15u32; - let start_3 = 20u32; - let start_4 = 25u32; - let start_5 = 30u32; + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); + let start_1 = 15u32; + let start_2 = 20u32; + let start_3 = 25u32; + let start_4 = 30u32; + let start_5 = 35u32; let expected_schedule_3 = Schedule { next_schedule: Some(start_4), ..default_test_schedule() }; @@ -315,8 +363,9 @@ fn next_schedule_always_points_to_next_work_plan_item() { )); // Rotate through the first two schedules + run_to_block(start_1, |n| if n == start_1 { Some(Default::default()) } else { None }); BulkAssigner::pop_assignment_for_core(core_idx); - run_to_block(15, |n| if n == 15 { Some(Default::default()) } else { None }); + run_to_block(start_2, |n| if n == start_2 { Some(Default::default()) } else { None }); BulkAssigner::pop_assignment_for_core(core_idx); // Use saved starting block numbers to check that schedules chain @@ -379,7 +428,7 @@ fn ensure_workload_works() { new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { let mut core_descriptor: CoreDescriptor> = CoreDescriptor { queue: None, current_work: None }; - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); // Case 1: No new schedule in CoreSchedules for core BulkAssigner::ensure_workload(10u32, core_idx, &mut core_descriptor); @@ -431,7 +480,7 @@ fn pop_assignment_for_core_works() { // on demand order to later pop with our bulk assigner. schedule_blank_para(para_id, ParaKind::Parathread); Balances::make_free_balance_be(&alice, amt); - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); assert_ok!(OnDemandAssigner::place_order_allow_death( RuntimeOrigin::signed(alice), amt, @@ -441,22 +490,24 @@ fn pop_assignment_for_core_works() { // Case 1: Assignment idle assert_ok!(BulkAssigner::assign_core( core_idx, - BlockNumberFor::::from(10u32), + BlockNumberFor::::from(11u32), default_test_assignments(), // Default is Idle None, )); + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + assert_eq!(BulkAssigner::pop_assignment_for_core(core_idx), None); // Case 2: Assignment pool assert_ok!(BulkAssigner::assign_core( core_idx, - BlockNumberFor::::from(11u32), + BlockNumberFor::::from(21u32), assignments_pool, None, )); - run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + run_to_block(21, |n| if n == 21 { Some(Default::default()) } else { None }); assert_eq!( BulkAssigner::pop_assignment_for_core(core_idx), @@ -466,12 +517,12 @@ fn pop_assignment_for_core_works() { // Case 3: Assignment task assert_ok!(BulkAssigner::assign_core( core_idx, - BlockNumberFor::::from(12u32), + BlockNumberFor::::from(31u32), assignments_task, None, )); - run_to_block(12, |n| if n == 12 { Some(Default::default()) } else { None }); + run_to_block(31, |n| if n == 31 { Some(Default::default()) } else { None }); assert_eq!( BulkAssigner::pop_assignment_for_core(core_idx), @@ -487,7 +538,7 @@ fn assignment_proportions_in_core_state_work() { let task_2 = TaskId::from(2u32); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); // Task 1 gets 2/3 core usage, while task 2 gets 1/3 let test_assignments = vec![ @@ -497,11 +548,13 @@ fn assignment_proportions_in_core_state_work() { assert_ok!(BulkAssigner::assign_core( core_idx, - BlockNumberFor::::from(10u32), + BlockNumberFor::::from(11u32), test_assignments, None, )); + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + // Case 1: Current assignment remaining >= step after pop { assert_eq!( @@ -566,7 +619,7 @@ fn equal_assignments_served_equally() { let task_2 = TaskId::from(2u32); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); // Tasks 1 and 2 get equal work parts let test_assignments = vec![ @@ -576,11 +629,13 @@ fn equal_assignments_served_equally() { assert_ok!(BulkAssigner::assign_core( core_idx, - BlockNumberFor::::from(10u32), + BlockNumberFor::::from(11u32), test_assignments, None, )); + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + // Test that popped assignments alternate between tasks 1 and 2 assert_eq!( BulkAssigner::pop_assignment_for_core(core_idx), @@ -625,7 +680,7 @@ fn assignment_proportions_indivisible_by_step_work() { let task_2 = TaskId::from(2u32); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); // Task 1 gets 3/5 core usage, while task 2 gets 2/5. That way // step is set to 2/5 and task 1 is indivisible by step. @@ -634,11 +689,13 @@ fn assignment_proportions_indivisible_by_step_work() { assert_ok!(BulkAssigner::assign_core( core_idx, - BlockNumberFor::::from(10u32), + BlockNumberFor::::from(11u32), test_assignments, None, )); + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + // Pop 5 assignments. Should Result in the the following work ordering: // 1, 2, 1, 1, 2. The remaining parts for each assignment should be same // at the end as in the beginning. -- GitLab From d4c426afd46f43b81115911657ccc0002a361ddb Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 16 Nov 2023 12:59:06 +0100 Subject: [PATCH 47/74] [ci] Enable zombienet jobs in PRs (#2361) Since preparation for the merge queues needs more time I'm enabling zombienet jobs in PRs CI back. --- .gitlab/pipeline/publish.yml | 2 +- .gitlab/pipeline/zombienet/cumulus.yml | 2 +- .gitlab/pipeline/zombienet/polkadot.yml | 8 ++++---- .gitlab/pipeline/zombienet/substrate.yml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index f2308c334e0..3cc2002cc1c 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -72,7 +72,7 @@ publish-rustdoc: IMAGE_NAME: "" # docker.io/paritypr/image_name script: # Exit if the job is not running in a merge queue - - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - $BUILDAH_COMMAND build --format=docker --build-arg VCS_REF="${CI_COMMIT_SHA}" diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index c8a1df004e3..3cac67c2966 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -4,7 +4,7 @@ .zombienet-before-script: before_script: # Exit if the job is not merge queue - - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - echo "Zombie-net Tests Config" - echo "${ZOMBIENET_IMAGE}" - echo "${POLKADOT_IMAGE}" diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index cc960557298..995dd982532 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -5,7 +5,7 @@ .zombienet-polkadot-common: before_script: # Exit if the job is not merge queue - - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export BUILD_RELEASE_VERSION="$(cat ./artifacts/BUILD_RELEASE_VERSION)" # from build-linux-stable job - export DEBUG=zombie,zombie::network-node - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} @@ -120,7 +120,7 @@ zombienet-polkadot-smoke-0001-parachains-smoke-test: - .zombienet-polkadot-common before_script: # Exit if the job is not merge queue - - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} - echo "Zombienet Tests Config" @@ -139,7 +139,7 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke: - .zombienet-polkadot-common before_script: # Exit if the job is not merge queue - - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:${DOCKER_IMAGES_VERSION}" - echo "Zombienet Tests Config" @@ -183,7 +183,7 @@ zombienet-polkadot-misc-0002-upgrade-node: artifacts: true before_script: # Exit if the job is not merge queue - - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="docker.io/parity/polkadot:latest" - echo "Overrided poladot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} diff --git a/.gitlab/pipeline/zombienet/substrate.yml b/.gitlab/pipeline/zombienet/substrate.yml index e627575a31a..6334e7db9a3 100644 --- a/.gitlab/pipeline/zombienet/substrate.yml +++ b/.gitlab/pipeline/zombienet/substrate.yml @@ -5,7 +5,7 @@ .zombienet-substrate-common: before_script: # Exit if the job is not merge queue - - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - echo "Zombienet Tests Config" - echo "${ZOMBIENET_IMAGE}" - echo "${GH_DIR}" -- GitLab From 02e8061bbadf51bce2f8c419c8645555532c7489 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Thu, 16 Nov 2023 17:36:37 +0200 Subject: [PATCH 48/74] westend: remove SessionKeys migration already applied on-chain (#2363) Westend now successfully updated to `spec: 103000`, we **have to remove** the session keys migration before the next release as it doesn't gracefully handle reapplying it. --- polkadot/runtime/westend/src/lib.rs | 48 ----------------------------- 1 file changed, 48 deletions(-) diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 2e8394b0ee4..4d537832ae3 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -425,17 +425,6 @@ parameter_types! { pub const Offset: BlockNumber = 0; } -impl_opaque_keys! { - pub struct OldSessionKeys { - pub grandpa: Grandpa, - pub babe: Babe, - pub im_online: ImOnline, - pub para_validator: Initializer, - pub para_assignment: ParaSessionInfo, - pub authority_discovery: AuthorityDiscovery, - } -} - impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, @@ -448,32 +437,6 @@ impl_opaque_keys! { } } -// remove this when removing `OldSessionKeys` -fn transform_session_keys(v: AccountId, old: OldSessionKeys) -> SessionKeys { - SessionKeys { - grandpa: old.grandpa, - babe: old.babe, - im_online: old.im_online, - para_validator: old.para_validator, - para_assignment: old.para_assignment, - authority_discovery: old.authority_discovery, - beefy: { - // From Session::upgrade_keys(): - // - // Care should be taken that the raw versions of the - // added keys are unique for every `ValidatorId, KeyTypeId` combination. - // This is an invariant that the session pallet typically maintains internally. - // - // So, produce a dummy value that's unique for the `ValidatorId, KeyTypeId` combination. - let mut id: BeefyId = sp_application_crypto::ecdsa::Public::from_raw([0u8; 33]).into(); - let id_raw: &mut [u8] = id.as_mut(); - id_raw[1..33].copy_from_slice(v.as_ref()); - id_raw[0..4].copy_from_slice(b"beef"); - id - }, - } -} - impl pallet_session::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; @@ -1560,16 +1523,6 @@ pub type Migrations = migrations::Unreleased; pub mod migrations { use super::*; - /// Upgrade Session keys to include BEEFY key. - /// When this is removed, should also remove `OldSessionKeys`. - pub struct UpgradeSessionKeys; - impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { - fn on_runtime_upgrade() -> Weight { - Session::upgrade_keys::(transform_session_keys); - Perbill::from_percent(50) * BlockWeights::get().max_block - } - } - /// Unreleased migrations. Add new ones here: pub type Unreleased = ( pallet_im_online::migration::v1::Migration, @@ -1578,7 +1531,6 @@ pub mod migrations { assigned_slots::migration::v1::MigrateToV1, parachains_scheduler::migration::v1::MigrateToV1, parachains_configuration::migration::v8::MigrateToV8, - UpgradeSessionKeys, parachains_configuration::migration::v9::MigrateToV9, paras_registrar::migration::MigrateToV1, pallet_nomination_pools::migration::versioned_migrations::V5toV6, -- GitLab From 5e98803f032a7c0ac2d8fae6b0cfc76417917e2a Mon Sep 17 00:00:00 2001 From: ordian Date: Thu, 16 Nov 2023 21:13:04 +0100 Subject: [PATCH 49/74] implementers-guide: update github link (#2368) cc @joepetrowski --- polkadot/roadmap/implementers-guide/book.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/roadmap/implementers-guide/book.toml b/polkadot/roadmap/implementers-guide/book.toml index 1e6680f6f4b..f677c0d59c0 100644 --- a/polkadot/roadmap/implementers-guide/book.toml +++ b/polkadot/roadmap/implementers-guide/book.toml @@ -17,6 +17,6 @@ renderer = ["html"] additional-css = ["last-changed.css"] additional-js = ["mermaid.min.js", "mermaid-init.js"] # Repository URL used in the last-changed link. -git-repository-url = "https://github.com/paritytech/polkadot" +git-repository-url = "https://github.com/paritytech/polkadot-sdk" [output.linkcheck] -- GitLab From 4ac2db8095e55a68f9b3d843d4e4689759fcc9d5 Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Fri, 17 Nov 2023 00:02:56 +0100 Subject: [PATCH 50/74] Fix Typo: `PalletXcmExtrinsicsBenchmark` (#2354) Missed in https://github.com/paritytech/polkadot-sdk/pull/1672 --- .../parachains/runtimes/assets/asset-hub-kusama/src/lib.rs | 6 +++--- .../runtimes/assets/asset-hub-polkadot/src/lib.rs | 6 +++--- .../parachains/runtimes/assets/asset-hub-rococo/src/lib.rs | 6 +++--- .../parachains/runtimes/assets/asset-hub-westend/src/lib.rs | 6 +++--- .../runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs | 6 +++--- .../runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs | 6 +++--- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 6 +++--- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 6 +++--- .../runtimes/collectives/collectives-polkadot/src/lib.rs | 6 +++--- .../runtimes/contracts/contracts-rococo/src/lib.rs | 6 +++--- polkadot/runtime/rococo/src/lib.rs | 6 +++--- polkadot/runtime/westend/src/lib.rs | 6 +++--- 12 files changed, 36 insertions(+), 36 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index e4ed77884bf..af0116d7014 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -962,7 +962,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1200,7 +1200,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -1244,7 +1244,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs index 6f853b6f56e..1b7ef10f485 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs @@ -868,7 +868,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1082,7 +1082,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -1125,7 +1125,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 4492971566b..4b4ae61a3e8 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -996,7 +996,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_xcm_bridge_hub_router, ToWestend] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1234,7 +1234,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; // This is defined once again in dispatch_benchmark, because list_benchmarks! @@ -1286,7 +1286,7 @@ impl_runtime_apis! { Config as XcmBridgeHubRouterConfig, }; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index cd17b9d86f7..d52edfe479c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1085,7 +1085,7 @@ mod benches { [cumulus_pallet_dmp_queue, DmpQueue] [pallet_xcm_bridge_hub_router, ToRococo] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1369,7 +1369,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; // This is defined once again in dispatch_benchmark, because list_benchmarks! @@ -1416,7 +1416,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs index b3750700084..d2db0340790 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs @@ -490,7 +490,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -670,7 +670,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -706,7 +706,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs index 841bb4ee861..02f05a8bb87 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs @@ -491,7 +491,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -671,7 +671,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -707,7 +707,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index b8fc2fffc88..5a44ccbb75a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -554,7 +554,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -784,7 +784,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -826,7 +826,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 9e8fd84e712..d1d2b4a4159 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -544,7 +544,7 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -772,7 +772,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -814,7 +814,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index 206f4614060..c3d671c9085 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -751,7 +751,7 @@ mod benches { [cumulus_pallet_dmp_queue, DmpQueue] [pallet_alliance, Alliance] [pallet_collective, AllianceMotion] - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_preimage, Preimage] [pallet_scheduler, Scheduler] [pallet_referenda, FellowshipReferenda] @@ -939,7 +939,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -969,7 +969,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 4c66e780ba9..5b828bad0c7 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -433,7 +433,7 @@ mod benches { [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] [pallet_contracts, Contracts] - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] ); } @@ -678,7 +678,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -709,7 +709,7 @@ impl_runtime_apis! { impl cumulus_pallet_session_benchmarking::Config for Runtime {} use xcm::latest::prelude::*; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 277c9981dab..675e0a20b2b 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1617,7 +1617,7 @@ mod benches { [pallet_asset_rate, AssetRate] [pallet_whitelist, Whitelist] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_xcm_benchmarks::fungible, pallet_xcm_benchmarks::fungible::Pallet::] [pallet_xcm_benchmarks::generic, pallet_xcm_benchmarks::generic::Pallet::] ); @@ -2096,7 +2096,7 @@ sp_api::impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; use frame_benchmarking::baseline::Pallet as Baseline; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -2115,7 +2115,7 @@ sp_api::impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use frame_system_benchmarking::Pallet as SystemBench; use frame_benchmarking::baseline::Pallet as Baseline; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use sp_storage::TrackedStorageKey; use xcm::latest::prelude::*; use xcm_config::{ diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 4d537832ae3..16a0b112a0b 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1606,7 +1606,7 @@ mod benches { [pallet_whitelist, Whitelist] [pallet_asset_rate, AssetRate] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -2129,7 +2129,7 @@ sp_api::impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; @@ -2157,7 +2157,7 @@ sp_api::impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; -- GitLab From 596088a273eff13fbefe6fa20a5fef6507b329cb Mon Sep 17 00:00:00 2001 From: Bruno Galvao Date: Fri, 17 Nov 2023 01:31:31 -0500 Subject: [PATCH 51/74] add pallet nomination-pools versioned migration to kitchensink (#2167) The versioned migrations are already there in pallet nomination-pools: https://github.com/paritytech/polkadot-sdk/blob/f6ee4781f633f0f89598f7b230595afe401da8dc/substrate/frame/nomination-pools/src/migration.rs#L27-L48 Just updating the kitchensink runtime to point to them. This is also nice because it points the dev to an example of how to use `VersionedMigration`. --- polkadot/runtime/westend/src/lib.rs | 4 ++-- substrate/bin/node/runtime/src/lib.rs | 5 +++-- substrate/frame/nomination-pools/src/migration.rs | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 16a0b112a0b..29183fdfe00 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1533,9 +1533,9 @@ pub mod migrations { parachains_configuration::migration::v8::MigrateToV8, parachains_configuration::migration::v9::MigrateToV9, paras_registrar::migration::MigrateToV1, - pallet_nomination_pools::migration::versioned_migrations::V5toV6, + pallet_nomination_pools::migration::versioned::V5toV6, pallet_referenda::migration::v1::MigrateV0ToV1, - pallet_nomination_pools::migration::versioned_migrations::V6ToV7, + pallet_nomination_pools::migration::versioned::V6ToV7, pallet_grandpa::migrations::MigrateV4ToV5, parachains_configuration::migration::v10::MigrateToV10, ); diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 90946b71311..d7beb29becf 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -2194,9 +2194,10 @@ pub type Executive = frame_executive::Executive< >; // All migrations executed on runtime upgrade as a nested tuple of types implementing -// `OnRuntimeUpgrade`. +// `OnRuntimeUpgrade`. Note: These are examples and do not need to be run directly +// after the genesis block. type Migrations = ( - pallet_nomination_pools::migration::v2::MigrateToV2, + pallet_nomination_pools::migration::versioned::V6ToV7, pallet_alliance::migration::Migration, pallet_contracts::Migration, ); diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index eef2a976f1a..3d68fee1dca 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -24,7 +24,7 @@ use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; use sp_runtime::TryRuntimeError; /// Exports for versioned migration `type`s for this pallet. -pub mod versioned_migrations { +pub mod versioned { use super::*; /// Migration V6 to V7 wrapped in a [`frame_support::migrations::VersionedMigration`], ensuring -- GitLab From b85c64aa5fa945027c7a2bfe1788e09309febc7a Mon Sep 17 00:00:00 2001 From: cuteolaf Date: Thu, 16 Nov 2023 23:20:09 -0800 Subject: [PATCH 52/74] fix typo (#2377) --- substrate/frame/proxy/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/frame/proxy/README.md b/substrate/frame/proxy/README.md index bfe26d9aefb..c52a881c590 100644 --- a/substrate/frame/proxy/README.md +++ b/substrate/frame/proxy/README.md @@ -2,7 +2,7 @@ A module allowing accounts to give permission to other accounts to dispatch types of calls from their signed origin. -The accounts to which permission is delegated may be requied to announce the action that they +The accounts to which permission is delegated may be required to announce the action that they wish to execute some duration prior to execution happens. In this case, the target account may reject the announcement and in doing so, veto the execution. -- GitLab From 20723ea80e0f442487764a9bcde1d4c989958e56 Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Fri, 17 Nov 2023 08:21:42 -0300 Subject: [PATCH 53/74] bump zombienet version `v1.3.80` (#2376) New release includes logic to move all jobs to `spot instances`. Thx! --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1dc483004f2..b485ca3317e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,7 @@ variables: RUSTY_CACHIER_COMPRESSION_METHOD: zstd NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.79" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.80" DOCKER_IMAGES_VERSION: "${CI_COMMIT_SHA}" default: -- GitLab From 96176ff1ef55a03344d12b0b78518b9a10f022bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anton=20Vilhelm=20=C3=81sgeirsson?= Date: Fri, 17 Nov 2023 11:41:30 +0000 Subject: [PATCH 54/74] Add broker system parachain config to rococo (#2374) Adds system parachain configuration and gates the `assign_core` call to only the Broker system parachain. --- .../parachains/src/assigner_bulk/mod.rs | 32 +++++++++++++++++-- polkadot/runtime/rococo/constants/src/lib.rs | 4 ++- polkadot/runtime/rococo/src/lib.rs | 3 ++ polkadot/runtime/rococo/src/xcm_config.rs | 5 ++- 4 files changed, 40 insertions(+), 4 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_bulk/mod.rs b/polkadot/runtime/parachains/src/assigner_bulk/mod.rs index 8f021791ba0..3f674a857c7 100644 --- a/polkadot/runtime/parachains/src/assigner_bulk/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_bulk/mod.rs @@ -180,6 +180,10 @@ pub mod pallet { { /// Something that provides the weight of this pallet. type WeightInfo: WeightInfo; + + /// Origin from which coretime extrinsics may be called. This is generally the Broker + /// system parachain. + type ExternalBrokerOrigin: EnsureOrigin; } /// Scheduled assignment sets. @@ -217,8 +221,32 @@ pub mod pallet { } } + /// Receive instructions from the `ExternalBrokerOrigin`, detailing how a specific core is to be + /// used. + /// + /// Parameters: + /// -`origin`: The `ExternalBrokerOrigin`, assumed to be the Broker system parachain. + /// -`core`: The core that should be scheduled. + /// -`begin`: The starting blockheight of the instruction. + /// -`assignment`: How the blockspace should be utilised. + /// -`end_hint`: An optional hint as to when this particular set of instructions will end. #[pallet::call] - impl Pallet {} + impl Pallet { + //TODO: Weights + #[pallet::call_index(0)] + pub fn assign_core( + origin: OriginFor, + core: CoreIndex, + begin: BlockNumberFor, + assignment: Vec<(CoreAssignment, PartsOf57600)>, + end_hint: Option>, + ) -> DispatchResult { + // Ignore requests not coming from the External Broker parachain. + let _multi_location = ::ExternalBrokerOrigin::ensure_origin(origin)?; + + Pallet::::do_assign_core(core, begin, assignment, end_hint) + } + } #[pallet::error] pub enum Error { @@ -412,7 +440,7 @@ impl Pallet { /// The problem is that insertion complexity then depends on the size of the existing queue, /// which makes determining weights hard and could lead to issues like overweight blocks (at /// least in theory). - pub fn assign_core( + pub fn do_assign_core( core_idx: CoreIndex, begin: BlockNumberFor, assignments: Vec<(CoreAssignment, PartsOf57600)>, diff --git a/polkadot/runtime/rococo/constants/src/lib.rs b/polkadot/runtime/rococo/constants/src/lib.rs index 2f641d60fc8..6862e3f70b9 100644 --- a/polkadot/runtime/rococo/constants/src/lib.rs +++ b/polkadot/runtime/rococo/constants/src/lib.rs @@ -113,10 +113,12 @@ pub mod system_parachain { pub const ENCOINTER_ID: u32 = 1003; /// BridgeHub parachain ID. pub const BRIDGE_HUB_ID: u32 = 1013; + /// Brokerage parachain ID. + pub const BROKER_ID: u32 = 1004; frame_support::match_types! { pub type SystemParachains: impl Contains = { - MultiLocation { parents: 0, interior: X1(Parachain(ASSET_HUB_ID | CONTRACTS_ID | ENCOINTER_ID | BRIDGE_HUB_ID)) } + MultiLocation { parents: 0, interior: X1(Parachain(ASSET_HUB_ID | CONTRACTS_ID | ENCOINTER_ID | BRIDGE_HUB_ID | BROKER_ID)) } }; } } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 62bd05cd85b..ffd70795a20 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -21,6 +21,7 @@ #![recursion_limit = "512"] use pallet_nis::WithMaximumOf; +use pallet_xcm::EnsureXcm; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, @@ -120,6 +121,7 @@ use governance::{ pallet_custom_origins, AuctionAdmin, Fellows, GeneralAdmin, LeaseAdmin, Treasurer, TreasurySpender, }; +use xcm_config::Broker; #[cfg(test)] mod tests; @@ -979,6 +981,7 @@ impl parachains_assigner_bulk::Config for Runtime { // FIXME: Proper weights: type WeightInfo = (); // type WeightInfo = weights::runtime_parachains_assigner_bulk::WeightInfo; + type ExternalBrokerOrigin = EnsureXcm; } impl parachains_assigner_parachains::Config for Runtime {} diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 0814b77414f..eab3a3a42f2 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -25,7 +25,7 @@ use crate::governance::StakingAdmin; use frame_support::{ match_types, parameter_types, - traits::{Everything, Nothing}, + traits::{Contains, Everything, Nothing}, weights::Weight, }; use frame_system::EnsureRoot; @@ -118,6 +118,7 @@ parameter_types! { pub const Contracts: MultiLocation = Parachain(CONTRACTS_ID).into_location(); pub const Encointer: MultiLocation = Parachain(ENCOINTER_ID).into_location(); pub const BridgeHub: MultiLocation = Parachain(BRIDGE_HUB_ID).into_location(); + pub const Broker: MultiLocation = Parachain(BROKER_ID).into_location(); pub const Tick: MultiLocation = Parachain(100).into_location(); pub const Trick: MultiLocation = Parachain(110).into_location(); pub const Track: MultiLocation = Parachain(120).into_location(); @@ -128,6 +129,7 @@ parameter_types! { pub const RocForContracts: (MultiAssetFilter, MultiLocation) = (Roc::get(), Contracts::get()); pub const RocForEncointer: (MultiAssetFilter, MultiLocation) = (Roc::get(), Encointer::get()); pub const RocForBridgeHub: (MultiAssetFilter, MultiLocation) = (Roc::get(), BridgeHub::get()); + pub const RocForBroker: (MultiAssetFilter, MultiLocation) = (Roc::get(), Broker::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; } @@ -139,6 +141,7 @@ pub type TrustedTeleporters = ( xcm_builder::Case, xcm_builder::Case, xcm_builder::Case, + xcm_builder::Case, ); match_types! { -- GitLab From 2e001de9346257321a8e2447d22e86d7c71a917a Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Fri, 17 Nov 2023 13:48:31 +0100 Subject: [PATCH 55/74] [NPoS] Check if staker is exposed in paged exposure storage entries (#2369) Addresses a bug caused by https://github.com/paritytech/polkadot-sdk/pull/1189. The changes are still not released yet, so would like to push the fix soon so it can go together with the release of the above PR. `fast_unstake` checks if a staker is exposed in an era. However, this fn is still returning whether the staker is exposed based on the old storage item. This PR fixes that by looking in both old and new exposure storages. Also adds some integrity tests for paged exposures. --- substrate/frame/staking/src/pallet/impls.rs | 74 ++++++++++++++++++++- substrate/frame/staking/src/tests.rs | 16 +++++ 2 files changed, 89 insertions(+), 1 deletion(-) diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 9c36c94b87b..40f30735258 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -794,7 +794,7 @@ impl Pallet { stash: T::AccountId, exposure: Exposure>, ) { - >::insert(¤t_era, &stash, &exposure); + EraInfo::::set_exposure(current_era, &stash, exposure); } #[cfg(feature = "runtime-benchmarks")] @@ -1745,9 +1745,16 @@ impl StakingInterface for Pallet { } fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool { + // look in the non paged exposures + // FIXME: Can be cleaned up once non paged exposures are cleared (https://github.com/paritytech/polkadot-sdk/issues/433) ErasStakers::::iter_prefix(era).any(|(validator, exposures)| { validator == *who || exposures.others.iter().any(|i| i.who == *who) }) + || + // look in the paged exposures + ErasStakersPaged::::iter_prefix((era,)).any(|((validator, _), exposure_page)| { + validator == *who || exposure_page.others.iter().any(|i| i.who == *who) + }) } fn status( who: &Self::AccountId, @@ -1812,6 +1819,7 @@ impl Pallet { Self::check_nominators()?; Self::check_exposures()?; + Self::check_paged_exposures()?; Self::check_ledgers()?; Self::check_count() } @@ -1860,6 +1868,70 @@ impl Pallet { .collect::>() } + fn check_paged_exposures() -> Result<(), TryRuntimeError> { + use sp_staking::PagedExposureMetadata; + use sp_std::collections::btree_map::BTreeMap; + + // Sanity check for the paged exposure of the active era. + let mut exposures: BTreeMap>> = + BTreeMap::new(); + let era = Self::active_era().unwrap().index; + let accumulator_default = PagedExposureMetadata { + total: Zero::zero(), + own: Zero::zero(), + nominator_count: 0, + page_count: 0, + }; + + ErasStakersPaged::::iter_prefix((era,)) + .map(|((validator, _page), expo)| { + ensure!( + expo.page_total == + expo.others.iter().map(|e| e.value).fold(Zero::zero(), |acc, x| acc + x), + "wrong total exposure for the page.", + ); + + let metadata = exposures.get(&validator).unwrap_or(&accumulator_default); + exposures.insert( + validator, + PagedExposureMetadata { + total: metadata.total + expo.page_total, + own: metadata.own, + nominator_count: metadata.nominator_count + expo.others.len() as u32, + page_count: metadata.page_count + 1, + }, + ); + + Ok(()) + }) + .collect::>()?; + + exposures + .iter() + .map(|(validator, metadata)| { + let actual_overview = ErasStakersOverview::::get(era, validator); + + ensure!(actual_overview.is_some(), "No overview found for a paged exposure"); + let actual_overview = actual_overview.unwrap(); + + ensure!( + actual_overview.total == metadata.total + actual_overview.own, + "Exposure metadata does not have correct total exposed stake." + ); + ensure!( + actual_overview.nominator_count == metadata.nominator_count, + "Exposure metadata does not have correct count of nominators." + ); + ensure!( + actual_overview.page_count == metadata.page_count, + "Exposure metadata does not have correct count of pages." + ); + + Ok(()) + }) + .collect::>() + } + fn check_nominators() -> Result<(), TryRuntimeError> { // a check per nominator to ensure their entire stake is correctly distributed. Will only // kick-in if the nomination was submitted before the current era. diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index ee6f67adf14..bac2530b19b 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -6637,6 +6637,14 @@ fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout( ); assert_eq!(EraInfo::::get_page_count(1, &11), 2); + // validator is exposed + assert!(::is_exposed_in_era(&11, &1)); + // nominators are exposed + for i in 10..15 { + let who: AccountId = 1000 + i; + assert!(::is_exposed_in_era(&who, &1)); + } + // case 2: exposure exist in ErasStakers and ErasStakersClipped (legacy). // delete paged storage and add exposure to clipped storage >::remove((1, 11, 0)); @@ -6672,6 +6680,14 @@ fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout( assert_eq!(actual_exposure_full.own, 1000); assert_eq!(actual_exposure_full.total, total_exposure); + // validator is exposed + assert!(::is_exposed_in_era(&11, &1)); + // nominators are exposed + for i in 10..15 { + let who: AccountId = 1000 + i; + assert!(::is_exposed_in_era(&who, &1)); + } + // for pages other than 0, clipped storage returns empty exposure assert_eq!(EraInfo::::get_paged_exposure(1, &11, 1), None); // page size is 1 for clipped storage -- GitLab From 3ab2bc9ff3c6ecf14694f01e3c7e8f394219585a Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 17 Nov 2023 15:24:20 +0200 Subject: [PATCH 56/74] Beefy: small fixes (#2378) Related to #2285 - save the state of the BEEFY gadget after processing a finality proof. We need this in order to avoid skipping blocks. - avoid reprocessing the old state when not necessary --- .../client/consensus/beefy/src/worker.rs | 36 ++++++++++++------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index 309d8c5135b..966c7410365 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -456,6 +456,7 @@ where .filter(|genesis| *genesis == self.persisted_state.pallet_genesis) .ok_or(Error::ConsensusReset)?; + let mut new_session_added = false; if *header.number() > self.best_grandpa_block() { // update best GRANDPA finalized block we have seen self.persisted_state.set_best_grandpa(header.clone()); @@ -475,9 +476,15 @@ where { if let Some(new_validator_set) = find_authorities_change::(&header) { self.init_session_at(new_validator_set, *header.number()); + new_session_added = true; } } + if new_session_added { + crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state) + .map_err(|e| Error::Backend(e.to_string()))?; + } + // Update gossip validator votes filter. if let Err(e) = self .persisted_state @@ -848,15 +855,10 @@ where .fuse(), ); + self.process_new_state(); let error = loop { - // Act on changed 'state'. - self.process_new_state(); - // Mutable reference used to drive the gossip engine. let mut gossip_engine = &mut self.comms.gossip_engine; - // Use temp val and report after async section, - // to avoid having to Mutex-wrap `gossip_engine`. - let mut gossip_report: Option = None; // Wait for, and handle external events. // The branches below only change 'state', actual voting happens afterwards, @@ -884,10 +886,15 @@ where if let Err(err) = self.triage_incoming_justif(justif) { debug!(target: LOG_TARGET, "🥩 {}", err); } - gossip_report = Some(peer_report); + self.comms.gossip_engine.report(peer_report.who, peer_report.cost_benefit); + }, + ResponseInfo::PeerReport(peer_report) => { + self.comms.gossip_engine.report(peer_report.who, peer_report.cost_benefit); + continue; + }, + ResponseInfo::Pending => { + continue; }, - ResponseInfo::PeerReport(peer_report) => gossip_report = Some(peer_report), - ResponseInfo::Pending => (), } }, justif = block_import_justif.next() => { @@ -924,12 +931,15 @@ where }, // Process peer reports. report = self.comms.gossip_report_stream.next() => { - gossip_report = report; + if let Some(PeerReport { who, cost_benefit }) = report { + self.comms.gossip_engine.report(who, cost_benefit); + } + continue; }, } - if let Some(PeerReport { who, cost_benefit }) = gossip_report { - self.comms.gossip_engine.report(who, cost_benefit); - } + + // Act on changed 'state'. + self.process_new_state(); }; // return error _and_ `comms` that can be reused -- GitLab From 5007e2dd5cb5514fe11ad1bdb21ea9c7fb0ae5ca Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 17 Nov 2023 14:43:37 +0100 Subject: [PATCH 57/74] crypto: `lazy_static` removed, light parser for address URI added (#2250) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The `lazy_static` package does not work well in `no-std`: it requires `spin_no_std` feature, which also will propagate into `std` if enabled. This is not what we want. This PR provides simple address uri parser which allows to get rid of _regex_ which was used to parse the address uri, what in turns allows to remove lazy_static. Three regular expressions (`SS58_REGEX`,`SECRET_PHRASE_REGEX`,`JUNCTION_REGEX`) were replaced with the parser which unifies all of them. The new parser does not support Unicode, it is ASCII only. Related to: #2044 --------- Co-authored-by: Bastian Köcher Co-authored-by: Koute Co-authored-by: command-bot <> --- Cargo.lock | 45 +- Cargo.toml | 1 + ...age_ensure_span_are_ok_on_wrong_gen.stderr | 12 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 12 +- substrate/primitives/core/Cargo.toml | 6 +- substrate/primitives/core/fuzz/Cargo.toml | 20 + .../fuzz/fuzz_targets/fuzz_address_uri.rs | 53 +++ substrate/primitives/core/src/address_uri.rs | 432 ++++++++++++++++++ substrate/primitives/core/src/crypto.rs | 85 ++-- substrate/primitives/core/src/lib.rs | 2 + 10 files changed, 601 insertions(+), 67 deletions(-) create mode 100644 substrate/primitives/core/fuzz/Cargo.toml create mode 100644 substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs create mode 100644 substrate/primitives/core/src/address_uri.rs diff --git a/Cargo.lock b/Cargo.lock index 7854042ddbc..64da4b8996c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7408,6 +7408,17 @@ dependencies = [ "rle-decode-fast", ] +[[package]] +name = "libfuzzer-sys" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a96cfd5557eb82f2b83fed4955246c988d331975a002961b07c81584d107e7f7" +dependencies = [ + "arbitrary", + "cc", + "once_cell", +] + [[package]] name = "libloading" version = "0.7.4" @@ -8182,9 +8193,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memfd" @@ -14297,14 +14308,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.3" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.6", - "regex-syntax 0.7.4", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", ] [[package]] @@ -14321,10 +14332,16 @@ name = "regex-automata" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" + +[[package]] +name = "regex-automata" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.4", + "regex-syntax 0.8.2", ] [[package]] @@ -14335,9 +14352,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "remote-ext-tests-bags-list" @@ -17535,6 +17552,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "sp-core-fuzz" +version = "0.0.0" +dependencies = [ + "lazy_static", + "libfuzzer-sys", + "regex", + "sp-core", +] + [[package]] name = "sp-core-hashing" version = "9.0.0" diff --git a/Cargo.toml b/Cargo.toml index ed252e07053..57079aa4d03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -405,6 +405,7 @@ members = [ "substrate/primitives/consensus/sassafras", "substrate/primitives/consensus/slots", "substrate/primitives/core", + "substrate/primitives/core/fuzz", "substrate/primitives/core/hashing", "substrate/primitives/core/hashing/proc-macro", "substrate/primitives/crypto/ec-utils", diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 7375bcd2f16..b5d10827524 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -6,8 +6,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -44,8 +44,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others @@ -81,8 +81,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -119,8 +119,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others @@ -137,8 +137,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -177,8 +177,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 3a0a25712aa..b58902590b8 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -6,8 +6,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -44,8 +44,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others @@ -81,8 +81,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -119,8 +119,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others @@ -137,8 +137,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -177,8 +177,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 79df81e62c6..9ecce0a22f5 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -26,10 +26,8 @@ bs58 = { version = "0.5.0", default-features = false, optional = true } rand = { version = "0.8.5", features = ["small_rng"], optional = true } substrate-bip39 = { version = "0.4.4", optional = true } bip39 = { version = "2.0.0", default-features = false } -regex = { version = "1.6.0", optional = true } zeroize = { version = "1.4.3", default-features = false } secrecy = { version = "0.8.0", default-features = false } -lazy_static = { version = "1.4.0", default-features = false, optional = true } parking_lot = { version = "0.12.1", optional = true } ss58-registry = { version = "1.34.0", default-features = false } sp-std = { path = "../std", default-features = false} @@ -63,6 +61,8 @@ bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "cbc342e", [dev-dependencies] criterion = "0.4.0" serde_json = "1.0.108" +lazy_static = "1.4.0" +regex = "1.6.0" sp-core-hashing-proc-macro = { path = "hashing/proc-macro" } [[bench]] @@ -92,7 +92,6 @@ std = [ "hash256-std-hasher/std", "impl-serde/std", "itertools", - "lazy_static", "libsecp256k1/std", "log/std", "merlin/std", @@ -102,7 +101,6 @@ std = [ "primitive-types/serde", "primitive-types/std", "rand", - "regex", "scale-info/std", "schnorrkel/std", "secp256k1/global-context", diff --git a/substrate/primitives/core/fuzz/Cargo.toml b/substrate/primitives/core/fuzz/Cargo.toml new file mode 100644 index 00000000000..9a094b07d4a --- /dev/null +++ b/substrate/primitives/core/fuzz/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "sp-core-fuzz" +version = "0.0.0" +publish = false + +[package.metadata] +cargo-fuzz = true + +[dependencies] +lazy_static = "1.4.0" +libfuzzer-sys = "0.4" +regex = "1.10.2" + +sp-core = { path = ".." } + +[[bin]] +name = "fuzz_address_uri" +path = "fuzz_targets/fuzz_address_uri.rs" +test = false +doc = false diff --git a/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs b/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs new file mode 100644 index 00000000000..e2d9e2fc8b0 --- /dev/null +++ b/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_main] + +extern crate libfuzzer_sys; +extern crate regex; +extern crate sp_core; + +use libfuzzer_sys::fuzz_target; +use regex::Regex; +use sp_core::crypto::AddressUri; + +lazy_static::lazy_static! { + static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") + .expect("constructed from known-good static value; qed"); +} + +fuzz_target!(|input: &str| { + let regex_result = SECRET_PHRASE_REGEX.captures(input); + let manual_result = AddressUri::parse(input); + assert_eq!(regex_result.is_some(), manual_result.is_ok()); + if manual_result.is_err() { + let _ = format!("{}", manual_result.as_ref().err().unwrap()); + } + if let (Some(regex_result), Ok(manual_result)) = (regex_result, manual_result) { + assert_eq!(regex_result.name("phrase").map(|p| p.as_str()), manual_result.phrase); + + let manual_paths = manual_result + .paths + .iter() + .map(|s| "/".to_string() + s) + .collect::>() + .join(""); + + assert_eq!(regex_result.name("path").unwrap().as_str().to_string(), manual_paths); + assert_eq!(regex_result.name("password").map(|pass| pass.as_str()), manual_result.pass); + } +}); diff --git a/substrate/primitives/core/src/address_uri.rs b/substrate/primitives/core/src/address_uri.rs new file mode 100644 index 00000000000..862747c9a4b --- /dev/null +++ b/substrate/primitives/core/src/address_uri.rs @@ -0,0 +1,432 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Little util for parsing an address URI. Replaces regular expressions. + +#[cfg(all(not(feature = "std"), any(feature = "serde", feature = "full_crypto")))] +use sp_std::{ + alloc::string::{String, ToString}, + vec::Vec, +}; + +/// A container for results of parsing the address uri string. +/// +/// Intended to be equivalent of: +/// `Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$")` +/// which also handles soft and hard derivation paths: +/// `Regex::new(r"/(/?[^/]+)")` +/// +/// Example: +/// ``` +/// use sp_core::crypto::AddressUri; +/// let manual_result = AddressUri::parse("hello world/s//h///pass"); +/// assert_eq!( +/// manual_result.unwrap(), +/// AddressUri { phrase: Some("hello world"), paths: vec!["s", "/h"], pass: Some("pass") } +/// ); +/// ``` +#[derive(Debug, PartialEq)] +pub struct AddressUri<'a> { + /// Phrase, hexadecimal string, or ss58-compatible string. + pub phrase: Option<&'a str>, + /// Key derivation paths, ordered as in input string, + pub paths: Vec<&'a str>, + /// Password. + pub pass: Option<&'a str>, +} + +/// Errors that are possible during parsing the address URI. +#[allow(missing_docs)] +#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Error { + #[cfg_attr(feature = "std", error("Invalid character in phrase:\n{0}"))] + InvalidCharacterInPhrase(InvalidCharacterInfo), + #[cfg_attr(feature = "std", error("Invalid character in password:\n{0}"))] + InvalidCharacterInPass(InvalidCharacterInfo), + #[cfg_attr(feature = "std", error("Missing character in hard path:\n{0}"))] + MissingCharacterInHardPath(InvalidCharacterInfo), + #[cfg_attr(feature = "std", error("Missing character in soft path:\n{0}"))] + MissingCharacterInSoftPath(InvalidCharacterInfo), +} + +impl Error { + /// Creates an instance of `Error::InvalidCharacterInPhrase` using given parameters. + pub fn in_phrase(input: &str, pos: usize) -> Self { + Self::InvalidCharacterInPhrase(InvalidCharacterInfo::new(input, pos)) + } + /// Creates an instance of `Error::InvalidCharacterInPass` using given parameters. + pub fn in_pass(input: &str, pos: usize) -> Self { + Self::InvalidCharacterInPass(InvalidCharacterInfo::new(input, pos)) + } + /// Creates an instance of `Error::MissingCharacterInHardPath` using given parameters. + pub fn in_hard_path(input: &str, pos: usize) -> Self { + Self::MissingCharacterInHardPath(InvalidCharacterInfo::new(input, pos)) + } + /// Creates an instance of `Error::MissingCharacterInSoftPath` using given parameters. + pub fn in_soft_path(input: &str, pos: usize) -> Self { + Self::MissingCharacterInSoftPath(InvalidCharacterInfo::new(input, pos)) + } +} + +/// Complementary error information. +/// +/// Strucutre contains complementary information about parsing address URI string. +/// String contains a copy of an original URI string, 0-based integer indicates position of invalid +/// character. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct InvalidCharacterInfo(String, usize); + +impl InvalidCharacterInfo { + fn new(info: &str, pos: usize) -> Self { + Self(info.to_string(), pos) + } +} + +impl sp_std::fmt::Display for InvalidCharacterInfo { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + let (s, pos) = escape_string(&self.0, self.1); + write!(f, "{s}\n{i}^", i = sp_std::iter::repeat(" ").take(pos).collect::()) + } +} + +/// Escapes the control characters in given string, and recomputes the position if some characters +/// were actually escaped. +fn escape_string(input: &str, pos: usize) -> (String, usize) { + let mut out = String::with_capacity(2 * input.len()); + let mut out_pos = 0; + input + .chars() + .enumerate() + .map(|(i, c)| { + let esc = |c| (i, Some('\\'), c, 2); + match c { + '\t' => esc('t'), + '\n' => esc('n'), + '\r' => esc('r'), + '\x07' => esc('a'), + '\x08' => esc('b'), + '\x0b' => esc('v'), + '\x0c' => esc('f'), + _ => (i, None, c, 1), + } + }) + .for_each(|(i, maybe_escape, c, increment)| { + maybe_escape.map(|e| out.push(e)); + out.push(c); + if i < pos { + out_pos += increment; + } + }); + (out, out_pos) +} + +fn extract_prefix<'a>(input: &mut &'a str, is_allowed: &dyn Fn(char) -> bool) -> Option<&'a str> { + let output = input.trim_start_matches(is_allowed); + let prefix_len = input.len() - output.len(); + let prefix = if prefix_len > 0 { Some(&input[..prefix_len]) } else { None }; + *input = output; + prefix +} + +fn strip_prefix(input: &mut &str, prefix: &str) -> bool { + if let Some(stripped_input) = input.strip_prefix(prefix) { + *input = stripped_input; + true + } else { + false + } +} + +impl<'a> AddressUri<'a> { + /// Parses the given string. + pub fn parse(mut input: &'a str) -> Result { + let initial_input = input; + let initial_input_len = input.len(); + let phrase = extract_prefix(&mut input, &|ch: char| { + ch.is_ascii_digit() || ch.is_ascii_alphabetic() || ch == ' ' + }); + + let mut pass = None; + let mut paths = Vec::new(); + while !input.is_empty() { + let unstripped_input = input; + if strip_prefix(&mut input, "///") { + pass = Some(extract_prefix(&mut input, &|ch: char| ch != '\n').unwrap_or("")); + } else if strip_prefix(&mut input, "//") { + let path = extract_prefix(&mut input, &|ch: char| ch != '/') + .ok_or(Error::in_hard_path(initial_input, initial_input_len - input.len()))?; + assert!(path.len() > 0); + // hard path shall contain leading '/', so take it from unstripped input. + paths.push(&unstripped_input[1..path.len() + 2]); + } else if strip_prefix(&mut input, "/") { + paths.push( + extract_prefix(&mut input, &|ch: char| ch != '/').ok_or( + Error::in_soft_path(initial_input, initial_input_len - input.len()), + )?, + ); + } else { + return Err(if pass.is_some() { + Error::in_pass(initial_input, initial_input_len - input.len()) + } else { + Error::in_phrase(initial_input, initial_input_len - input.len()) + }); + } + } + + Ok(Self { phrase, paths, pass }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use regex::Regex; + + lazy_static::lazy_static! { + static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") + .expect("constructed from known-good static value; qed"); + } + + fn check_with_regex(input: &str) { + let regex_result = SECRET_PHRASE_REGEX.captures(input); + let manual_result = AddressUri::parse(input); + assert_eq!(regex_result.is_some(), manual_result.is_ok()); + if let (Some(regex_result), Ok(manual_result)) = (regex_result, manual_result) { + assert_eq!( + regex_result.name("phrase").map(|phrase| phrase.as_str()), + manual_result.phrase + ); + + let manual_paths = manual_result + .paths + .iter() + .map(|s| "/".to_string() + s) + .collect::>() + .join(""); + + assert_eq!(regex_result.name("path").unwrap().as_str().to_string(), manual_paths); + assert_eq!( + regex_result.name("password").map(|phrase| phrase.as_str()), + manual_result.pass + ); + } + } + + fn check(input: &str, result: Result) { + let manual_result = AddressUri::parse(input); + assert_eq!(manual_result, result); + check_with_regex(input); + } + + #[test] + fn test00() { + check("///", Ok(AddressUri { phrase: None, pass: Some(""), paths: vec![] })); + } + + #[test] + fn test01() { + check("////////", Ok(AddressUri { phrase: None, pass: Some("/////"), paths: vec![] })) + } + + #[test] + fn test02() { + check( + "sdasd///asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: Some("asda"), paths: vec![] }), + ); + } + + #[test] + fn test03() { + check( + "sdasd//asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/asda"] }), + ); + } + + #[test] + fn test04() { + check("sdasd//a", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/a"] })); + } + + #[test] + fn test05() { + let input = "sdasd//"; + check(input, Err(Error::in_hard_path(input, 7))); + } + + #[test] + fn test06() { + check( + "sdasd/xx//asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["xx", "/asda"] }), + ); + } + + #[test] + fn test07() { + check( + "sdasd/xx//a/b//c///pass", + Ok(AddressUri { + phrase: Some("sdasd"), + pass: Some("pass"), + paths: vec!["xx", "/a", "b", "/c"], + }), + ); + } + + #[test] + fn test08() { + check( + "sdasd/xx//a", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["xx", "/a"] }), + ); + } + + #[test] + fn test09() { + let input = "sdasd/xx//"; + check(input, Err(Error::in_hard_path(input, 10))); + } + + #[test] + fn test10() { + check( + "sdasd/asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asda"] }), + ); + } + + #[test] + fn test11() { + check( + "sdasd/asda//x", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asda", "/x"] }), + ); + } + + #[test] + fn test12() { + check("sdasd/a", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["a"] })); + } + + #[test] + fn test13() { + let input = "sdasd/"; + check(input, Err(Error::in_soft_path(input, 6))); + } + + #[test] + fn test14() { + check("sdasd", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec![] })); + } + + #[test] + fn test15() { + let input = "sdasd."; + check(input, Err(Error::in_phrase(input, 5))); + } + + #[test] + fn test16() { + let input = "sd.asd/asd.a"; + check(input, Err(Error::in_phrase(input, 2))); + } + + #[test] + fn test17() { + let input = "sd.asd//asd.a"; + check(input, Err(Error::in_phrase(input, 2))); + } + + #[test] + fn test18() { + check( + "sdasd/asd.a", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asd.a"] }), + ); + } + + #[test] + fn test19() { + check( + "sdasd//asd.a", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/asd.a"] }), + ); + } + + #[test] + fn test20() { + let input = "///\n"; + check(input, Err(Error::in_pass(input, 3))); + } + + #[test] + fn test21() { + let input = "///a\n"; + check(input, Err(Error::in_pass(input, 4))); + } + + #[test] + fn test22() { + let input = "sd asd///asd.a\n"; + check(input, Err(Error::in_pass(input, 14))); + } + + #[test] + fn test_invalid_char_info_1() { + let expected = "01234\n^"; + let f = format!("{}", InvalidCharacterInfo::new("01234", 0)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_2() { + let expected = "01\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("01", 1)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_3() { + let expected = "01234\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("01234", 2)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_4() { + let expected = "012\\n456\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("012\n456", 3)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_5() { + let expected = "012\\n456\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("012\n456", 5)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_6() { + let expected = "012\\f456\\t89\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("012\x0c456\t89", 9)); + assert_eq!(expected, f); + } +} diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs index d369de5a1c0..c9719e344d3 100644 --- a/substrate/primitives/core/src/crypto.rs +++ b/substrate/primitives/core/src/crypto.rs @@ -25,8 +25,6 @@ use codec::{Decode, Encode, MaxEncodedLen}; use itertools::Itertools; #[cfg(feature = "std")] use rand::{rngs::OsRng, RngCore}; -#[cfg(feature = "std")] -use regex::Regex; use scale_info::TypeInfo; #[cfg(feature = "std")] pub use secrecy::{ExposeSecret, SecretString}; @@ -43,6 +41,11 @@ pub use ss58_registry::{from_known_address_format, Ss58AddressFormat, Ss58Addres /// Trait to zeroize a memory buffer. pub use zeroize::Zeroize; +#[cfg(feature = "std")] +pub use crate::address_uri::AddressUri; +#[cfg(any(feature = "std", feature = "full_crypto"))] +pub use crate::address_uri::Error as AddressUriError; + /// The root phrase for our publicly known keys. pub const DEV_PHRASE: &str = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; @@ -82,8 +85,8 @@ impl> UncheckedInto for S { #[cfg(feature = "full_crypto")] pub enum SecretStringError { /// The overall format was invalid (e.g. the seed phrase contained symbols). - #[cfg_attr(feature = "std", error("Invalid format"))] - InvalidFormat, + #[cfg_attr(feature = "std", error("Invalid format {0}"))] + InvalidFormat(AddressUriError), /// The seed phrase provided is not a valid BIP39 phrase. #[cfg_attr(feature = "std", error("Invalid phrase"))] InvalidPhrase, @@ -101,6 +104,13 @@ pub enum SecretStringError { InvalidPath, } +#[cfg(any(feature = "std", feature = "full_crypto"))] +impl From for SecretStringError { + fn from(e: AddressUriError) -> Self { + Self::InvalidFormat(e) + } +} + /// An error when deriving a key. #[cfg_attr(feature = "std", derive(thiserror::Error))] #[derive(Debug, Clone, PartialEq, Eq)] @@ -208,7 +218,7 @@ impl> From for DeriveJunction { /// An error type for SS58 decoding. #[cfg_attr(feature = "std", derive(thiserror::Error))] #[cfg_attr(not(feature = "std"), derive(Debug))] -#[derive(Clone, Copy, Eq, PartialEq)] +#[derive(Clone, Eq, PartialEq)] #[allow(missing_docs)] #[cfg(any(feature = "full_crypto", feature = "serde"))] pub enum PublicError { @@ -235,6 +245,11 @@ pub enum PublicError { InvalidPath, #[cfg_attr(feature = "std", error("Disallowed SS58 Address Format for this datatype."))] FormatNotAllowed, + #[cfg_attr(feature = "std", error("Password not allowed."))] + PasswordNotAllowed, + #[cfg(feature = "std")] + #[cfg_attr(feature = "std", error("Incorrect URI syntax {0}."))] + MalformedUri(#[from] AddressUriError), } #[cfg(feature = "std")] @@ -414,47 +429,40 @@ pub fn set_default_ss58_version(new_default: Ss58AddressFormat) { DEFAULT_VERSION.store(new_default.into(), core::sync::atomic::Ordering::Relaxed); } -#[cfg(feature = "std")] -lazy_static::lazy_static! { - static ref SS58_REGEX: Regex = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$") - .expect("constructed from known-good static value; qed"); - static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[\d\w ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") - .expect("constructed from known-good static value; qed"); - static ref JUNCTION_REGEX: Regex = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); -} - #[cfg(feature = "std")] impl + AsRef<[u8]> + Public + Derive> Ss58Codec for T { fn from_string(s: &str) -> Result { - let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; - let s = cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS); + let cap = AddressUri::parse(s)?; + if cap.pass.is_some() { + return Err(PublicError::PasswordNotAllowed); + } + let s = cap.phrase.unwrap_or(DEV_ADDRESS); let addr = if let Some(stripped) = s.strip_prefix("0x") { let d = array_bytes::hex2bytes(stripped).map_err(|_| PublicError::InvalidFormat)?; Self::from_slice(&d).map_err(|()| PublicError::BadLength)? } else { Self::from_ss58check(s)? }; - if cap["path"].is_empty() { + if cap.paths.is_empty() { Ok(addr) } else { - let path = - JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1])); - addr.derive(path).ok_or(PublicError::InvalidPath) + addr.derive(cap.paths.iter().map(DeriveJunction::from)) + .ok_or(PublicError::InvalidPath) } } fn from_string_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { - let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; - let (addr, v) = Self::from_ss58check_with_version( - cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS), - )?; - if cap["path"].is_empty() { + let cap = AddressUri::parse(s)?; + if cap.pass.is_some() { + return Err(PublicError::PasswordNotAllowed); + } + let (addr, v) = Self::from_ss58check_with_version(cap.phrase.unwrap_or(DEV_ADDRESS))?; + if cap.paths.is_empty() { Ok((addr, v)) } else { - let path = - JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1])); - addr.derive(path).ok_or(PublicError::InvalidPath).map(|a| (a, v)) + addr.derive(cap.paths.iter().map(DeriveJunction::from)) + .ok_or(PublicError::InvalidPath) + .map(|a| (a, v)) } } } @@ -817,22 +825,15 @@ impl sp_std::str::FromStr for SecretUri { type Err = SecretStringError; fn from_str(s: &str) -> Result { - let cap = SECRET_PHRASE_REGEX.captures(s).ok_or(SecretStringError::InvalidFormat)?; - - let junctions = JUNCTION_REGEX - .captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])) - .collect::>(); - - let phrase = cap.name("phrase").map(|r| r.as_str()).unwrap_or(DEV_PHRASE); - let password = cap.name("password"); + let cap = AddressUri::parse(s)?; + let phrase = cap.phrase.unwrap_or(DEV_PHRASE); Ok(Self { phrase: SecretString::from_str(phrase).expect("Returns infallible error; qed"), - password: password.map(|v| { - SecretString::from_str(v.as_str()).expect("Returns infallible error; qed") - }), - junctions, + password: cap + .pass + .map(|v| SecretString::from_str(v).expect("Returns infallible error; qed")), + junctions: cap.paths.iter().map(DeriveJunction::from).collect::>(), }) } } diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index ec0641c5466..4873d1a2112 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -55,6 +55,8 @@ pub mod crypto; pub mod hexdisplay; pub use paste; +#[cfg(any(feature = "full_crypto", feature = "std"))] +mod address_uri; #[cfg(feature = "bandersnatch-experimental")] pub mod bandersnatch; #[cfg(feature = "bls-experimental")] -- GitLab From 490fb66537c60e1a391350b3f0a786efb7c17373 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 17 Nov 2023 16:40:49 +0200 Subject: [PATCH 58/74] [trivial] asset-hubs runtimes: fix incorrect doc-comments (#2384) Fix some incorrect doc-comments --- .../runtimes/assets/asset-hub-rococo/src/xcm_config.rs | 4 ++-- .../runtimes/assets/asset-hub-westend/src/xcm_config.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index b85cb76642f..6e04924f038 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -109,7 +109,7 @@ pub type CurrencyTransactor = CurrencyAdapter< (), >; -/// `AssetId`/`Balance` converter for `PoolAssets`. +/// `AssetId`/`Balance` converter for `TrustBackedAssets`. pub type TrustBackedAssetsConvertedConcreteId = assets_common::TrustBackedAssetsConvertedConcreteId; @@ -130,7 +130,7 @@ pub type FungiblesTransactor = FungiblesAdapter< CheckingAccount, >; -/// `AssetId/Balance` converter for `TrustBackedAssets` +/// `AssetId`/`Balance` converter for `ForeignAssets`. pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConvertedConcreteId< ( // Ignore `TrustBackedAssets` explicitly diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index 17312c0f46e..6942559671b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -109,7 +109,7 @@ pub type CurrencyTransactor = CurrencyAdapter< (), >; -/// `AssetId/Balance` converter for `TrustBackedAssets` +/// `AssetId`/`Balance` converter for `TrustBackedAssets`. pub type TrustBackedAssetsConvertedConcreteId = assets_common::TrustBackedAssetsConvertedConcreteId; @@ -130,7 +130,7 @@ pub type FungiblesTransactor = FungiblesAdapter< CheckingAccount, >; -/// `AssetId/Balance` converter for `TrustBackedAssets` +/// `AssetId`/`Balance` converter for `ForeignAssets`. pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConvertedConcreteId< ( // Ignore `TrustBackedAssets` explicitly -- GitLab From db85616c432c191ea8fdf8b45d66a102cf19c8d2 Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 17 Nov 2023 15:53:32 +0100 Subject: [PATCH 59/74] Make Westend and Rococo compile. --- .../parachains/src/assigner_bulk/mod.rs | 35 +----------- polkadot/runtime/rococo/src/lib.rs | 13 ++--- polkadot/runtime/rococo/src/xcm_config.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 53 ++++++++++++++++++- polkadot/runtime/westend/src/weights/mod.rs | 1 + 5 files changed, 58 insertions(+), 46 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_bulk/mod.rs b/polkadot/runtime/parachains/src/assigner_bulk/mod.rs index c6670215cf2..7b21614de11 100644 --- a/polkadot/runtime/parachains/src/assigner_bulk/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_bulk/mod.rs @@ -178,12 +178,6 @@ pub mod pallet { pub trait Config: frame_system::Config + configuration::Config + paras::Config + assigner_on_demand::Config { - /// Something that provides the weight of this pallet. - type WeightInfo: WeightInfo; - - /// Origin from which coretime extrinsics may be called. This is generally the Broker - /// system parachain. - type ExternalBrokerOrigin: EnsureOrigin; } /// Scheduled assignment sets. @@ -216,33 +210,6 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet {} - /// Receive instructions from the `ExternalBrokerOrigin`, detailing how a specific core is to be - /// used. - /// - /// Parameters: - /// -`origin`: The `ExternalBrokerOrigin`, assumed to be the Broker system parachain. - /// -`core`: The core that should be scheduled. - /// -`begin`: The starting blockheight of the instruction. - /// -`assignment`: How the blockspace should be utilised. - /// -`end_hint`: An optional hint as to when this particular set of instructions will end. - #[pallet::call] - impl Pallet { - //TODO: Weights - #[pallet::call_index(0)] - pub fn assign_core( - origin: OriginFor, - core: CoreIndex, - begin: BlockNumberFor, - assignment: Vec<(CoreAssignment, PartsOf57600)>, - end_hint: Option>, - ) -> DispatchResult { - // Ignore requests not coming from the External Broker parachain. - let _multi_location = ::ExternalBrokerOrigin::ensure_origin(origin)?; - - Pallet::::do_assign_core(core, begin, assignment, end_hint) - } - } - #[pallet::error] pub enum Error { AssignmentsEmpty, @@ -435,7 +402,7 @@ impl Pallet { /// The problem is that insertion complexity then depends on the size of the existing queue, /// which makes determining weights hard and could lead to issues like overweight blocks (at /// least in theory). - pub fn do_assign_core( + pub fn assign_core( core_idx: CoreIndex, begin: BlockNumberFor, assignments: Vec<(CoreAssignment, PartsOf57600)>, diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index ffd70795a20..2cdb2ef391e 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -21,7 +21,6 @@ #![recursion_limit = "512"] use pallet_nis::WithMaximumOf; -use pallet_xcm::EnsureXcm; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, @@ -121,7 +120,6 @@ use governance::{ pallet_custom_origins, AuctionAdmin, Fellows, GeneralAdmin, LeaseAdmin, Treasurer, TreasurySpender, }; -use xcm_config::Broker; #[cfg(test)] mod tests; @@ -977,12 +975,8 @@ impl parachains_assigner_on_demand::Config for Runtime { type TrafficDefaultValue = OnDemandTrafficDefaultValue; type WeightInfo = weights::runtime_parachains_assigner_on_demand::WeightInfo; } -impl parachains_assigner_bulk::Config for Runtime { - // FIXME: Proper weights: - type WeightInfo = (); - // type WeightInfo = weights::runtime_parachains_assigner_bulk::WeightInfo; - type ExternalBrokerOrigin = EnsureXcm; -} + +impl parachains_assigner_bulk::Config for Runtime {} impl parachains_assigner_parachains::Config for Runtime {} @@ -1368,6 +1362,7 @@ construct_runtime! { ParaAssignmentProvider: parachains_assigner_v1::{Pallet, Storage} = 65, OnDemandAssignmentProvider: parachains_assigner_on_demand::{Pallet, Call, Storage, Event} = 66, ParachainsAssignmentProvider: parachains_assigner_parachains::{Pallet} = 67, + CoreTimeAssignmentProvider: parachains_assigner_bulk::{Pallet} = 68, // Parachain Onboarding Pallets. Start indices at 70 to leave room. Registrar: paras_registrar::{Pallet, Call, Storage, Event, Config} = 70, @@ -1512,7 +1507,7 @@ pub mod migrations { ); /// We are swapping out the assignment type in the scheduler for coretime. - struct SchedulerAssignmentMigration(sp_std::marker::PhantomData); + pub struct SchedulerAssignmentMigration(sp_std::marker::PhantomData); impl parachains_scheduler::migration::assignment_version::AssignmentMigration for SchedulerAssignmentMigration { diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index eab3a3a42f2..9a0e02ddc1a 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -25,7 +25,7 @@ use crate::governance::StakingAdmin; use frame_support::{ match_types, parameter_types, - traits::{Contains, Everything, Nothing}, + traits::{Everything, Nothing}, weights::Weight, }; use frame_system::EnsureRoot; diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 911ffef34d8..91ed73f4c0f 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -64,6 +64,8 @@ use runtime_common::{ BlockLength, CurrencyToVote, SlowAdjustingFeeUpdate, U256ToBalance, }; use runtime_parachains::{ + assigner::v1 as parachains_assigner_v1, + assigner_bulk as parachains_assigner_bulk, assigner_on_demand as parachains_assigner_on_demand, assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, disputes::slashing as parachains_slashing, @@ -1211,8 +1213,23 @@ impl parachains_scheduler::Config for Runtime { type AssignmentProvider = ParaAssignmentProvider; } +parameter_types! { + pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); +} + +impl parachains_assigner_on_demand::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type TrafficDefaultValue = OnDemandTrafficDefaultValue; + type WeightInfo = weights::runtime_parachains_assigner_on_demand::WeightInfo; +} + +impl parachains_assigner_bulk::Config for Runtime {} + impl parachains_assigner_parachains::Config for Runtime {} +impl parachains_assigner_v1::Config for Runtime {} + impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; type ForceOrigin = EnsureRoot; @@ -1472,7 +1489,12 @@ construct_runtime! { ParaSessionInfo: parachains_session_info::{Pallet, Storage} = 52, ParasDisputes: parachains_disputes::{Pallet, Call, Storage, Event} = 53, ParasSlashing: parachains_slashing::{Pallet, Call, Storage, ValidateUnsigned} = 54, - ParaAssignmentProvider: parachains_assigner_parachains::{Pallet, Storage} = 55, + // TODO: I just swapped out the pallet here (keeping the same number). I assume this is + // fine as we are not exposing any calls, events nor origins, but I am not 100% sure. + ParaAssignmentProvider: parachains_assigner_v1::{Pallet, Storage} = 55, + OnDemandAssignmentProvider: parachains_assigner_on_demand::{Pallet, Call, Storage, Event} = 56, + ParachainsAssignmentProvider: parachains_assigner_parachains::{Pallet} = 57, + CoreTimeAssignmentProvider: parachains_assigner_bulk::{Pallet} = 58, // Parachain Onboarding Pallets. Start indices at 60 to leave room. Registrar: paras_registrar::{Pallet, Call, Storage, Event, Config} = 60, @@ -1536,6 +1558,9 @@ pub type Migrations = migrations::Unreleased; pub mod migrations { use super::*; + use parachains_scheduler::common::AssignmentVersion; + use primitives::CoreIndex; + /// Upgrade Session keys to include BEEFY key. /// When this is removed, should also remove `OldSessionKeys`. pub struct UpgradeSessionKeys; @@ -1554,7 +1579,10 @@ pub mod migrations { assigned_slots::migration::v1::VersionCheckedMigrateToV1, parachains_scheduler::migration::v1::MigrateToV1, parachains_scheduler::migration::v2::MigrateToV2, - parachains_scheduler::migration::assignment_version::MigrateAssignment, + parachains_scheduler::migration::assignment_version::MigrateAssignment< + Runtime, + SchedulerAssignmentMigration, + >, parachains_configuration::migration::v8::MigrateToV8, UpgradeSessionKeys, parachains_configuration::migration::v9::MigrateToV9, @@ -1563,6 +1591,27 @@ pub mod migrations { pallet_referenda::migration::v1::MigrateV0ToV1, pallet_nomination_pools::migration::versioned_migrations::V6ToV7, ); + + /// We are swapping out the assignment type in the scheduler for coretime. + pub struct SchedulerAssignmentMigration(sp_std::marker::PhantomData); + impl parachains_scheduler::migration::assignment_version::AssignmentMigration + for SchedulerAssignmentMigration + { + const ON_CHAIN_STORAGE_VERSION: AssignmentVersion = AssignmentVersion::new(0); + const STORAGE_VERSION: AssignmentVersion = AssignmentVersion::new(1); + + type OldType = parachains_scheduler::common::V0Assignment; + type NewType = parachains_assigner_v1::UnifiedAssignmentType; + + fn migrate(core_idx: CoreIndex, old: Self::OldType) -> Self::NewType { + // While previously the plain legacy assigner was in place (as opposed to v0 top-level + // assigner as on Rococo), the assignment format changed exactly the same, so the v0 to + // v1 migration of the top-level assigner fits the bill here as well. + // + // The same is true for Kusama and Polkadot later. + parachains_assigner_v1::migrate_assignment_v0_to_v1::(old, core_idx) + } + } } /// Unchecked extrinsic type as expected by this runtime. diff --git a/polkadot/runtime/westend/src/weights/mod.rs b/polkadot/runtime/westend/src/weights/mod.rs index 9ae6798d70b..0268190b985 100644 --- a/polkadot/runtime/westend/src/weights/mod.rs +++ b/polkadot/runtime/westend/src/weights/mod.rs @@ -48,6 +48,7 @@ pub mod runtime_common_auctions; pub mod runtime_common_crowdloan; pub mod runtime_common_paras_registrar; pub mod runtime_common_slots; +pub mod runtime_parachains_assigner_on_demand; pub mod runtime_parachains_configuration; pub mod runtime_parachains_disputes; pub mod runtime_parachains_disputes_slashing; -- GitLab From 079b14f624bef9dd901f93c29424cba0ebbd325d Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Fri, 17 Nov 2023 18:01:08 +0200 Subject: [PATCH 60/74] Do not panic if the `fdlimit` call to increase the file descriptor limit fails (#2155) # Description Sometimes changing file descriptor limits is not allowed, but there is no need to crash the node if/when this happens. Since `fdlimit`'s author decided to use panics instead of returning `Result`, we need to catch it. # Checklist - [x] My PR includes a detailed description as outlined in the "Description" section above - [ ] My PR follows the [labeling requirements](CONTRIBUTING.md#Process) of this project (at minimum one label for `T` required) - [ ] I have made corresponding changes to the documentation (if applicable) - [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) --------- Co-authored-by: Koute --- Cargo.lock | 13 +++++++------ substrate/client/cli/Cargo.toml | 2 +- substrate/client/cli/src/config.rs | 23 +++++++++++++++++------ substrate/client/service/test/Cargo.toml | 2 +- substrate/client/service/test/src/lib.rs | 2 +- 5 files changed, 27 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 64da4b8996c..da266eb5d47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5342,11 +5342,12 @@ dependencies = [ [[package]] name = "fdlimit" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c4c9e43643f5a3be4ca5b67d26b98031ff9db6806c3440ae32e02e3ceac3f1b" +checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" dependencies = [ "libc", + "thiserror", ] [[package]] @@ -19321,9 +19322,9 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.48" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" dependencies = [ "thiserror-impl", ] @@ -19350,9 +19351,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "1.0.48" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index c415527c372..c4464c5f787 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = "6.1" chrono = "0.4.27" clap = { version = "4.4.6", features = ["derive", "string", "wrap_help"] } -fdlimit = "0.2.1" +fdlimit = "0.3.0" futures = "0.3.21" itertools = "0.10.3" libp2p-identity = { version = "0.1.3", features = ["peerid", "ed25519"]} diff --git a/substrate/client/cli/src/config.rs b/substrate/client/cli/src/config.rs index 4d218da6aa8..b842df5a690 100644 --- a/substrate/client/cli/src/config.rs +++ b/substrate/client/cli/src/config.rs @@ -605,14 +605,25 @@ pub trait CliConfiguration: Sized { logger.init()?; - if let Some(new_limit) = fdlimit::raise_fd_limit() { - if new_limit < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT { + match fdlimit::raise_fd_limit() { + Ok(fdlimit::Outcome::LimitRaised { to, .. }) => + if to < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT { + warn!( + "Low open file descriptor limit configured for the process. \ + Current value: {:?}, recommended value: {:?}.", + to, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, + ); + }, + Ok(fdlimit::Outcome::Unsupported) => { + // Unsupported platform (non-Linux) + }, + Err(error) => { warn!( - "Low open file descriptor limit configured for the process. \ - Current value: {:?}, recommended value: {:?}.", - new_limit, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, + "Failed to configure file descriptor limit for the process: \ + {}, recommended value: {:?}.", + error, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, ); - } + }, } Ok(()) diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 670312e4161..c6091f97d63 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-channel = "1.8.0" array-bytes = "6.1" -fdlimit = "0.2.1" +fdlimit = "0.3.0" futures = "0.3.21" log = "0.4.17" parity-scale-codec = "3.6.1" diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs index 9700c7643c4..456df73459a 100644 --- a/substrate/client/service/test/src/lib.rs +++ b/substrate/client/service/test/src/lib.rs @@ -285,7 +285,7 @@ where base_port: u16, ) -> TestNet { sp_tracing::try_init_simple(); - fdlimit::raise_fd_limit(); + fdlimit::raise_fd_limit().unwrap(); let runtime = Runtime::new().expect("Error creating tokio runtime"); let mut net = TestNet { runtime, -- GitLab From 0385902cd05523155832b155fe7ce4d1eaf7b779 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Fri, 17 Nov 2023 17:06:45 +0100 Subject: [PATCH 61/74] Relax `force_default_xcm_version` for testnet system parachains (#2385) This PR fixes two things: - relax `force_default_xcm_version` for testnet system parachains (e.g. BridgeHubWestend has now 2 and there is no way to change it to 3, so we need to call `force_xcm_version(3)` for every parachain that it is connected to, because we send XCMv3 messages) - add `Storage` item to `PolkadotXcm` pallet definition (now we cannot see storage items for `pallet_xcm` in PJS) ## TODO - [ ] when merged open PR to `polkadot-fellows/runtimes` repo --- .../assets/asset-hub-rococo/src/xcm_config.rs | 16 +++++++++------- .../assets/asset-hub-westend/src/xcm_config.rs | 16 +++++++++------- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../bridge-hub-rococo/src/xcm_config.rs | 16 +++++++++------- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../bridge-hub-westend/src/xcm_config.rs | 16 +++++++++------- .../collectives-westend/src/xcm_config.rs | 6 ++++-- 7 files changed, 42 insertions(+), 32 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 6e04924f038..4da0a2500a5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -275,13 +275,15 @@ impl Contains for SafeCallFilter { matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | RuntimeCall::CollatorSelection( diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index 6942559671b..4760e087e24 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -272,13 +272,15 @@ impl Contains for SafeCallFilter { matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | RuntimeCall::CollatorSelection( diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 5a44ccbb75a..8e138822696 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -498,7 +498,7 @@ construct_runtime!( // XCM helpers. XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, + PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index 1436c5b96a3..de7b5315c88 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -161,13 +161,15 @@ impl Contains for SafeCallFilter { matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | RuntimeCall::CollatorSelection( diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index d1d2b4a4159..9c97728058f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -497,7 +497,7 @@ construct_runtime!( // XCM helpers. XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, + PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index 7084882c41f..c89ee91c5e4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -150,13 +150,15 @@ impl Contains for SafeCallFilter { matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | RuntimeCall::CollatorSelection( diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index d58995827fa..cefc099c96f 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -178,8 +178,10 @@ impl Contains for SafeCallFilter { pallet_collator_selection::Call::add_invulnerable { .. } | pallet_collator_selection::Call::remove_invulnerable { .. }, ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::XcmpQueue(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::XcmpQueue(..) | RuntimeCall::MessageQueue(..) | RuntimeCall::Alliance( // `init_members` accepts unbounded vecs as arguments, -- GitLab From 9e34163aca9d58d7351451833d9cd214b266e7f0 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 17 Nov 2023 17:10:46 +0100 Subject: [PATCH 62/74] Make collator RPC mode non-experimental (#2381) The `--relay-chain-rpc-urls` CLI flag has been available for a while now. We have collators with this running and parachain teams are also using it. It should be fine now to remove the experimental status. --- cumulus/client/cli/src/lib.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cumulus/client/cli/src/lib.rs b/cumulus/client/cli/src/lib.rs index 1b18ed06437..a2238b73b2b 100644 --- a/cumulus/client/cli/src/lib.rs +++ b/cumulus/client/cli/src/lib.rs @@ -296,7 +296,14 @@ pub struct RunCmd { #[arg(long, conflicts_with = "validator")] pub collator: bool, - /// EXPERIMENTAL: Specify an URL to a relay chain full node to communicate with. + /// Creates a less resource-hungry node that retrieves relay chain data from an RPC endpoint. + /// + /// The provided URLs should point to RPC endpoints of the relay chain. + /// This node connects to the remote nodes following the order they were specified in. If the + /// connection fails, it attempts to connect to the next endpoint in the list. + /// + /// Note: This option doesn't stop the node from connecting to the relay chain network but + /// reduces bandwidth use. #[arg( long, value_parser = validate_relay_chain_url, -- GitLab From 82912acb33a9030c0ef3bf590a34fca09b72dc5f Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Fri, 17 Nov 2023 20:14:45 +0400 Subject: [PATCH 63/74] Fix migrations and add CI check for new system chains (#2336) Westend Collectives migration CI check can be fixed once we have https://github.com/paritytech/try-runtime-cli/pull/58, will open another PR once it is available. - [x] Remove deprecated `DmpQueue` pallet from Rococo Contracts, the migration is complete - [x] Fix Asset Hub Rococo storage versions - [x] Add migration check CI for Asset Hub Rococo and Westend Bridge Hub --- .gitlab/pipeline/check.yml | 25 ++++++++ .../assets/asset-hub-rococo/src/lib.rs | 59 ++++++++++++++++++- .../contracts/contracts-rococo/src/lib.rs | 1 - .../contracts-rococo/src/xcm_config.rs | 6 -- 4 files changed, 82 insertions(+), 9 deletions(-) diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 4071fdf9758..429491fb174 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -151,6 +151,31 @@ check-runtime-migration-asset-hub-westend: PACKAGE: "asset-hub-westend-runtime" WASM: "asset_hub_westend_runtime.compact.compressed.wasm" URI: "wss://westend-asset-hub-rpc.polkadot.io:443" + +check-runtime-migration-asset-hub-rococo: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "asset-hub-rococo" + PACKAGE: "asset-hub-rococo-runtime" + WASM: "asset_hub_rococo_runtime.compact.compressed.wasm" + URI: "wss://rococo-asset-hub-rpc.polkadot.io:443" + +# Check runtime migrations for Parity managed bridge hub chains +check-runtime-migration-bridge-hub-westend: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "bridge-hub-westend" + PACKAGE: "bridge-hub-westend-runtime" + WASM: "bridge_hub_westend_runtime.compact.compressed.wasm" + URI: "wss://westend-bridge-hub-rpc.polkadot.io:443" check-runtime-migration-bridge-hub-rococo: stage: check diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 4b4ae61a3e8..b274f45877b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -39,7 +39,9 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Verify}, + traits::{ + AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Saturating, Verify, + }, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Permill, }; @@ -959,7 +961,60 @@ pub type SignedExtra = ( pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. -pub type Migrations = (pallet_collator_selection::migration::v1::MigrateToV1,); +pub type Migrations = + (pallet_collator_selection::migration::v1::MigrateToV1, InitStorageVersions); + +/// Migration to initialize storage versions for pallets added after genesis. +/// +/// This is now done automatically (see ), +/// but some pallets had made it in and had storage set in them for this parachain before it was +/// merged. +pub struct InitStorageVersions; + +impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { + fn on_runtime_upgrade() -> Weight { + use frame_support::traits::{GetStorageVersion, StorageVersion}; + + let mut writes = 0; + + if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { + PolkadotXcm::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Multisig::on_chain_storage_version() == StorageVersion::new(0) { + Multisig::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Assets::on_chain_storage_version() == StorageVersion::new(0) { + Assets::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Uniques::on_chain_storage_version() == StorageVersion::new(0) { + Uniques::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Nfts::on_chain_storage_version() == StorageVersion::new(0) { + Nfts::current_storage_version().put::(); + writes.saturating_inc(); + } + + if ForeignAssets::on_chain_storage_version() == StorageVersion::new(0) { + ForeignAssets::current_storage_version().put::(); + writes.saturating_inc(); + } + + if PoolAssets::on_chain_storage_version() == StorageVersion::new(0) { + PoolAssets::current_storage_version().put::(); + writes.saturating_inc(); + } + + ::DbWeight::get().reads_writes(7, writes) + } +} /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 5b828bad0c7..9d6a53c5ed3 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -405,7 +405,6 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Smart Contracts. diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index faee1c68fe6..2ac93aed3f8 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -304,9 +304,3 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = cumulus_pallet_dmp_queue::weights::SubstrateWeight; - type RuntimeEvent = crate::RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} -- GitLab From 1d1c3719487c8b9f9b770e5c33e8e8b237bd8824 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 17 Nov 2023 23:48:34 +0100 Subject: [PATCH 64/74] Bump bandersnatch VRF revision (#2389) Closes https://github.com/paritytech/polkadot-sdk/issues/2327 cc @burdges --- Cargo.lock | 40 ++++++++++++++-------------- substrate/primitives/core/Cargo.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da266eb5d47..ed9997101fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -544,7 +544,7 @@ dependencies = [ [[package]] name = "ark-secret-scalar" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502" +source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" dependencies = [ "ark-ec", "ark-ff", @@ -593,7 +593,7 @@ dependencies = [ [[package]] name = "ark-transcript" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502" +source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" dependencies = [ "ark-ff", "ark-serialize", @@ -1369,8 +1369,8 @@ dependencies = [ [[package]] name = "bandersnatch_vrfs" -version = "0.0.3" -source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502" +version = "0.0.4" +source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" dependencies = [ "ark-bls12-381", "ark-ec", @@ -1493,8 +1493,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" dependencies = [ "bitcoin_hashes", - "rand 0.8.5", - "rand_core 0.6.4", + "rand 0.7.3", + "rand_core 0.5.1", "serde", "unicode-normalization", ] @@ -3065,7 +3065,7 @@ dependencies = [ [[package]] name = "common" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof#edd1e90b847e560bf60fc2e8712235ccfa11a9a9" +source = "git+https://github.com/burdges/ring-proof?branch=patch-1#05a756076cb20f981a52afea3a620168de49f95f" dependencies = [ "ark-ec", "ark-ff", @@ -4814,7 +4814,7 @@ checksum = "86e3bdc80eee6e16b2b6b0f87fbc98c04bee3455e35174c0de1a125d0688c632" [[package]] name = "dleq_vrf" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502" +source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" dependencies = [ "ark-ec", "ark-ff", @@ -5389,7 +5389,7 @@ dependencies = [ [[package]] name = "fflonk" version = "0.1.0" -source = "git+https://github.com/w3f/fflonk#26a5045b24e169cffc1f9328ca83d71061145c40" +source = "git+https://github.com/w3f/fflonk#1beb0585e1c8488956fac7f05da061f9b41e8948" dependencies = [ "ark-ec", "ark-ff", @@ -14445,7 +14445,7 @@ dependencies = [ [[package]] name = "ring" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof#edd1e90b847e560bf60fc2e8712235ccfa11a9a9" +source = "git+https://github.com/burdges/ring-proof?branch=patch-1#05a756076cb20f981a52afea3a620168de49f95f" dependencies = [ "ark-ec", "ark-ff", @@ -17607,7 +17607,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.4.1" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -17645,7 +17645,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "proc-macro2", "quote", @@ -17665,7 +17665,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "environmental", "parity-scale-codec", @@ -17897,7 +17897,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -17926,7 +17926,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "Inflector", "proc-macro-crate", @@ -18054,7 +18054,7 @@ version = "8.0.0" [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" [[package]] name = "sp-storage" @@ -18071,7 +18071,7 @@ dependencies = [ [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "impl-serde", "parity-scale-codec", @@ -18120,7 +18120,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "parity-scale-codec", "sp-std 8.0.0 (git+https://github.com/paritytech/polkadot-sdk)", @@ -18221,7 +18221,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -20008,7 +20008,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.7", - "rand 0.8.5", + "rand 0.7.3", "static_assertions", ] diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 9ecce0a22f5..25478bed2d9 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -56,7 +56,7 @@ sp-runtime-interface = { path = "../runtime-interface", default-features = false # bls crypto w3f-bls = { version = "0.1.3", default-features = false, optional = true} # bandersnatch crypto -bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "cbc342e", default-features = false, optional = true } +bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "3ddc205", default-features = false, optional = true } [dev-dependencies] criterion = "0.4.0" -- GitLab From ac93c7be92c0231d048e2e35c5e8256e3ee86b7f Mon Sep 17 00:00:00 2001 From: eskimor Date: Sat, 18 Nov 2023 08:18:58 +0100 Subject: [PATCH 65/74] Fix mock. --- polkadot/runtime/parachains/src/mock.rs | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index ee3600bb114..b1d86b36c0c 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -17,15 +17,14 @@ //! Mocks for all the traits. use crate::{ - assigner, assigner_bulk, assigner_on_demand, assigner_parachains, configuration, disputes, dmp, - hrmp, + assigner::v1 as assigner_v1, + assigner_bulk, assigner_on_demand, assigner_parachains, configuration, disputes, dmp, hrmp, inclusion::{self, AggregateMessageOrigin, UmpQueueId}, initializer, origin, paras, paras::ParaKind, paras_inherent, scheduler, scheduler::common::{ - AssignmentProvider, AssignmentProviderConfig, AssignmentVersion, FixedAssignmentProvider, - V0Assignment, + AssignmentProvider, AssignmentProviderConfig, FixedAssignmentProvider, V0Assignment, }, session_info, shared, ParaId, }; @@ -72,7 +71,7 @@ frame_support::construct_runtime!( ParaInherent: paras_inherent, Scheduler: scheduler, MockAssigner: mock_assigner, - Assigner: assigner, + Assigner: assigner_v1, ParachainsAssigner: assigner_parachains, OnDemandAssigner: assigner_on_demand, BulkAssigner: assigner_bulk, @@ -356,7 +355,7 @@ parameter_types! { pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); } -impl assigner::Config for Test {} +impl assigner_v1::Config for Test {} impl assigner_parachains::Config for Test {} @@ -367,9 +366,7 @@ impl assigner_on_demand::Config for Test { type WeightInfo = crate::assigner_on_demand::TestWeightInfo; } -impl assigner_bulk::Config for Test { - type WeightInfo = crate::assigner_bulk::TestWeightInfo; -} +impl assigner_bulk::Config for Test {} impl crate::inclusion::Config for Test { type WeightInfo = (); @@ -453,17 +450,7 @@ pub mod mock_assigner { } impl AssignmentProvider for Pallet { - // Simplest assignment used for testing type AssignmentType = V0Assignment; - type OldAssignmentType = V0Assignment; - const ASSIGNMENT_STORAGE_VERSION: AssignmentVersion = AssignmentVersion::new(0); - - fn migrate_old_to_current( - old: Self::OldAssignmentType, - _core: CoreIndex, - ) -> Self::AssignmentType { - old - } // With regards to popping_assignments, the scheduler just needs to be tested under // the following two conditions: -- GitLab From 6ebff6955ff382bc43697d1ee8f3072ded1831fd Mon Sep 17 00:00:00 2001 From: eskimor Date: Sat, 18 Nov 2023 11:07:28 +0100 Subject: [PATCH 66/74] Add missing weights file. --- .../runtime_parachains_assigner_on_demand.rs | 91 +++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs new file mode 100644 index 00000000000..ac0f05301b4 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs @@ -0,0 +1,91 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `runtime_parachains::assigner_on_demand` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_parachains::assigner_on_demand +// --chain=rococo-dev +// --header=./file_header.txt +// --output=./runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_parachains::assigner_on_demand`. +pub struct WeightInfo(PhantomData); +impl runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { + /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 9999]`. + fn place_order_keep_alive(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `297 + s * (4 ±0)` + // Estimated: `3762 + s * (4 ±0)` + // Minimum execution time: 33_522_000 picoseconds. + Weight::from_parts(35_436_835, 0) + .saturating_add(Weight::from_parts(0, 3762)) + // Standard Error: 129 + .saturating_add(Weight::from_parts(14_041, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + } + /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 9999]`. + fn place_order_allow_death(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `297 + s * (4 ±0)` + // Estimated: `3762 + s * (4 ±0)` + // Minimum execution time: 33_488_000 picoseconds. + Weight::from_parts(34_848_934, 0) + .saturating_add(Weight::from_parts(0, 3762)) + // Standard Error: 143 + .saturating_add(Weight::from_parts(14_215, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + } +} -- GitLab From 794ee98049f546d39923b7f34ffcb7e719ae349d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 18 Nov 2023 13:58:16 +0200 Subject: [PATCH 67/74] Bump secp256k1 from 0.24.3 to 0.28.0 (#2357) Bumps [secp256k1](https://github.com/rust-bitcoin/rust-secp256k1) from 0.24.3 to 0.28.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=secp256k1&package-manager=cargo&previous-version=0.24.3&new-version=0.28.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Liam Aharon --- Cargo.lock | 8 ++++---- substrate/primitives/core/Cargo.toml | 2 +- substrate/primitives/core/src/ecdsa.rs | 13 ++++--------- substrate/primitives/io/Cargo.toml | 2 +- substrate/primitives/io/src/lib.rs | 4 ++-- 5 files changed, 12 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ed9997101fb..ac64e65ee0e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16602,18 +16602,18 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.24.3" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62" +checksum = "2acea373acb8c21ecb5a23741452acd2593ed44ee3d343e72baaa143bc89d0d5" dependencies = [ "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.6.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b" +checksum = "09e67c467c38fd24bd5499dc9a18183b31575c12ee549197e3e20d57aa4fe3b7" dependencies = [ "cc", ] diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 25478bed2d9..34485c72ab0 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -49,7 +49,7 @@ blake2 = { version = "0.10.4", default-features = false, optional = true } libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } merlin = { version = "2.0", default-features = false } -secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"], optional = true } +secp256k1 = { version = "0.28.0", default-features = false, features = ["recovery", "alloc"], optional = true } sp-core-hashing = { path = "hashing", default-features = false, optional = true } sp-runtime-interface = { path = "../runtime-interface", default-features = false} diff --git a/substrate/primitives/core/src/ecdsa.rs b/substrate/primitives/core/src/ecdsa.rs index 603fa515a30..471714582a6 100644 --- a/substrate/primitives/core/src/ecdsa.rs +++ b/substrate/primitives/core/src/ecdsa.rs @@ -336,7 +336,7 @@ impl Signature { pub fn recover_prehashed(&self, message: &[u8; 32]) -> Option { let rid = RecoveryId::from_i32(self.0[64] as i32).ok()?; let sig = RecoverableSignature::from_compact(&self.0[..64], rid).ok()?; - let message = Message::from_slice(message).expect("Message is 32 bytes; qed"); + let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); #[cfg(feature = "std")] let context = SECP256K1; @@ -458,7 +458,7 @@ impl Pair { /// Sign a pre-hashed message pub fn sign_prehashed(&self, message: &[u8; 32]) -> Signature { - let message = Message::from_slice(message).expect("Message is 32 bytes; qed"); + let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); #[cfg(feature = "std")] let context = SECP256K1; @@ -508,12 +508,7 @@ impl Pair { #[cfg(feature = "full_crypto")] impl Drop for Pair { fn drop(&mut self) { - let ptr = self.secret.as_mut_ptr(); - for off in 0..self.secret.len() { - unsafe { - core::ptr::write_volatile(ptr.add(off), 0); - } - } + self.secret.non_secure_erase() } } @@ -760,7 +755,7 @@ mod test { let msg = [0u8; 32]; let sig1 = pair.sign_prehashed(&msg); let sig2: Signature = { - let message = Message::from_slice(&msg).unwrap(); + let message = Message::from_digest_slice(&msg).unwrap(); SECP256K1.sign_ecdsa_recoverable(&message, &pair.secret).into() }; assert_eq!(sig1, sig2); diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index 445104b736e..59df8895bb7 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -28,7 +28,7 @@ sp-trie = { path = "../trie", default-features = false, optional = true} sp-externalities = { path = "../externalities", default-features = false} sp-tracing = { path = "../tracing", default-features = false} log = { version = "0.4.17", optional = true } -secp256k1 = { version = "0.24.0", features = ["recovery", "global-context"], optional = true } +secp256k1 = { version = "0.28.0", features = ["recovery", "global-context"], optional = true } tracing = { version = "0.1.29", default-features = false } tracing-core = { version = "0.1.28", default-features = false} diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index c4182d6ab3a..a300152ee66 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -1139,7 +1139,7 @@ pub trait Crypto { .map_err(|_| EcdsaVerifyError::BadV)?; let sig = RecoverableSignature::from_compact(&sig[..64], rid) .map_err(|_| EcdsaVerifyError::BadRS)?; - let msg = Message::from_slice(msg).expect("Message is 32 bytes; qed"); + let msg = Message::from_digest_slice(msg).expect("Message is 32 bytes; qed"); let pubkey = SECP256K1 .recover_ecdsa(&msg, &sig) .map_err(|_| EcdsaVerifyError::BadSignature)?; @@ -1185,7 +1185,7 @@ pub trait Crypto { .map_err(|_| EcdsaVerifyError::BadV)?; let sig = RecoverableSignature::from_compact(&sig[..64], rid) .map_err(|_| EcdsaVerifyError::BadRS)?; - let msg = Message::from_slice(msg).expect("Message is 32 bytes; qed"); + let msg = Message::from_digest_slice(msg).expect("Message is 32 bytes; qed"); let pubkey = SECP256K1 .recover_ecdsa(&msg, &sig) .map_err(|_| EcdsaVerifyError::BadSignature)?; -- GitLab From 28c0390284cff9c01d5355289c645ed97a52f070 Mon Sep 17 00:00:00 2001 From: eskimor Date: Sat, 18 Nov 2023 15:13:25 +0100 Subject: [PATCH 68/74] Fix migration checks. --- .../parachains/src/scheduler/migration.rs | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index be1025bec79..aef5c3ef700 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -118,7 +118,9 @@ pub mod assignment_version { #[cfg(feature = "try-runtime")] fn post_upgrade(state: Vec) -> Result<(), sp_runtime::DispatchError> { // Did migration take place? - if state.decode()? == M::ON_CHAIN_STORAGE_VERSION { + if ::decode(&mut &state[..]).unwrap() == + M::ON_CHAIN_STORAGE_VERSION + { ensure!( AssignmentVersion::get::>() == M::STORAGE_VERSION, "Assignment version should should match current version after the migration." @@ -335,11 +337,11 @@ pub mod v1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::DispatchError> { - let n: u32 = v1::ClaimQueue::::get().len() as u32 + - v1::AvailabilityCores::::get().iter().filter(|c| c.is_some()).count() as u32; + let n: u32 = v0::Scheduled::::get().len() as u32 + + v0::AvailabilityCores::::get().iter().filter(|c| c.is_some()).count() as u32; log::info!( - target: scheduler::LOG_TARGET, + target: crate::scheduler::LOG_TARGET, "Number of scheduled and waiting for availability before: {n}", ); @@ -350,10 +352,15 @@ pub mod v1 { fn post_upgrade(state: Vec) -> Result<(), sp_runtime::DispatchError> { log::info!(target: crate::scheduler::LOG_TARGET, "Running post_upgrade()"); + ensure!( + v0::Scheduled::::get().is_empty(), + "Scheduled should be empty after the migration" + ); + let expected_len = u32::decode(&mut &state[..]).unwrap(); - let availability_cores_waiting = super::AvailabilityCores::::get() - .iter() - .filter(|c| !matches!(c, CoreOccupied::Free)) + let availability_cores_waiting = v1::AvailabilityCores::::get() + .into_iter() + .filter(|c| !matches!(c, v1::CoreOccupied::Free)) .count(); ensure!( -- GitLab From 64191fc7a1257862aea0b982ad4d51408a5c79f2 Mon Sep 17 00:00:00 2001 From: eskimor Date: Sat, 18 Nov 2023 21:50:00 +0100 Subject: [PATCH 69/74] Clippy. --- polkadot/runtime/parachains/src/mock.rs | 5 +---- polkadot/runtime/parachains/src/scheduler/migration.rs | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index b1d86b36c0c..c2b60209383 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -489,10 +489,7 @@ pub mod mock_assigner { // 5, if no explicit count was set. impl FixedAssignmentProvider for Pallet { fn session_core_count() -> u32 { - match MockCoreCount::::get() { - Some(count) => count, - None => 5, - } + MockCoreCount::::get().unwrap_or(5) } } } diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index aef5c3ef700..6af5afa00b9 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -446,7 +446,7 @@ pub fn migrate_to_v2() -> Weight { let old = v1::ClaimQueue::::take(); let new = old .into_iter() - .map(|(k, v)| (k, v.into_iter().filter_map(identity).collect::>())) + .map(|(k, v)| (k, v.into_iter().flatten().collect::>())) .collect::>>>>(); v2::ClaimQueue::::put(new); -- GitLab From 5d03336349c0dd8f06f4d0474c4a8a658ed803be Mon Sep 17 00:00:00 2001 From: eskimor Date: Sat, 18 Nov 2023 21:56:34 +0100 Subject: [PATCH 70/74] Benchmark fixes. --- .../parachains/src/assigner_on_demand/benchmarking.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs index 42ca94d5185..04ab0a03361 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs @@ -70,11 +70,10 @@ mod benchmarks { let para_id = ParaId::from(111u32); init_parathread::(para_id); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let assignment = Assignment::new(para_id); + let order = EnqueuedOrder::new(para_id); for _ in 0..s { - Pallet::::add_on_demand_assignment(assignment.clone(), QueuePushDirection::Back) - .unwrap(); + Pallet::::add_on_demand_order(order.clone(), QueuePushDirection::Back).unwrap(); } #[extrinsic_call] @@ -88,11 +87,10 @@ mod benchmarks { let para_id = ParaId::from(111u32); init_parathread::(para_id); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let assignment = Assignment::new(para_id); + let order = EnqueuedOrder::new(para_id); for _ in 0..s { - Pallet::::add_on_demand_assignment(assignment.clone(), QueuePushDirection::Back) - .unwrap(); + Pallet::::add_on_demand_order(order.clone(), QueuePushDirection::Back).unwrap(); } #[extrinsic_call] -- GitLab From 52589c4ff61feade968be4d9be0e55adb2697f2e Mon Sep 17 00:00:00 2001 From: eskimor Date: Sun, 19 Nov 2023 13:53:13 +0100 Subject: [PATCH 71/74] Make benchmarking code compile. --- .../runtime/parachains/src/assigner/v1.rs | 18 ++++++++++++++++ .../parachains/src/assigner_bulk/mod.rs | 7 +++++++ .../parachains/src/assigner_on_demand/mod.rs | 19 +++++++++++++++++ .../parachains/src/assigner_parachains.rs | 5 +++++ polkadot/runtime/parachains/src/builder.rs | 21 ++++++------------- polkadot/runtime/parachains/src/mock.rs | 4 ++++ .../parachains/src/scheduler/common.rs | 7 +++++++ .../parachains/src/scheduler/migration.rs | 2 -- 8 files changed, 66 insertions(+), 17 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner/v1.rs b/polkadot/runtime/parachains/src/assigner/v1.rs index b6a74451052..212d9d39004 100644 --- a/polkadot/runtime/parachains/src/assigner/v1.rs +++ b/polkadot/runtime/parachains/src/assigner/v1.rs @@ -151,6 +151,24 @@ impl AssignmentProvider> for Pallet { ) } } + #[cfg(any(feature = "runtime-benchmarks", test))] + fn get_mock_assignment(core_idx: CoreIndex, para_id: ParaId) -> Self::AssignmentType { + let legacy_cores = as FixedAssignmentProvider< + BlockNumberFor, + >>::session_core_count(); + + if core_idx.0 < legacy_cores { + UnifiedAssignment::LegacyAuction( as AssignmentProvider< + BlockNumberFor, + >>::get_mock_assignment(core_idx, para_id)) + } else { + let core_idx = CoreIndex(core_idx.0 - legacy_cores); + + UnifiedAssignment::Bulk( as AssignmentProvider< + BlockNumberFor, + >>::get_mock_assignment(core_idx, para_id)) + } + } } impl FixedAssignmentProvider> for Pallet { diff --git a/polkadot/runtime/parachains/src/assigner_bulk/mod.rs b/polkadot/runtime/parachains/src/assigner_bulk/mod.rs index 7b21614de11..548c4e9b1d4 100644 --- a/polkadot/runtime/parachains/src/assigner_bulk/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_bulk/mod.rs @@ -339,6 +339,13 @@ impl AssignmentProvider> for Pallet { ttl: config.on_demand_ttl, } } + + #[cfg(any(feature = "runtime-benchmarks", test))] + fn get_mock_assignment(_: CoreIndex, para_id: ParaId) -> Self::AssignmentType { + // Given that we are not tracking anything in `Bulk` assignments, it is safe to always + // return a bulk assignment. + return BulkAssignment::Bulk(para_id) + } } impl FixedAssignmentProvider> for Pallet { diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index cd99681a380..af0358daa39 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -644,4 +644,23 @@ impl AssignmentProvider> for Pallet { ttl: config.on_demand_ttl, } } + + #[cfg(any(feature = "runtime-benchmarks", test))] + fn get_mock_assignment(core_idx: CoreIndex, para_id: ParaId) -> Self::AssignmentType { + Self::add_on_demand_order(EnqueuedOrder { para_id }, QueuePushDirection::Front).unwrap(); + let assignment = Self::pop_assignment_for_core(core_idx).unwrap(); + debug_assert_eq!( + assignment.para_id(), + para_id, + "Served para id does not match requested one in `get_mock_assignment`. + This can happen if on-demand assigner already served assignments, due to core affinity. + Possible fixes: + 1. Don't use on-demand for your mocking/benchmarks. + 2. Pick a different `ParaId`, one that you know has not been served already. + 3. Pick the same core index you just got served an assignment for that `ParaId` for. + 4. Implement this function differently ;-) + " + ); + assignment + } } diff --git a/polkadot/runtime/parachains/src/assigner_parachains.rs b/polkadot/runtime/parachains/src/assigner_parachains.rs index 4f2fdd47b05..ae9ce8b38f2 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains.rs @@ -95,6 +95,11 @@ impl AssignmentProvider> for Pallet { ttl: 10u32.into(), } } + + #[cfg(any(feature = "runtime-benchmarks", test))] + fn get_mock_assignment(_: CoreIndex, para_id: ParaId) -> Self::AssignmentType { + ParachainsAssignment { para_id } + } } impl FixedAssignmentProvider> for Pallet { diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index a05849374e4..c753941f784 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -15,14 +15,12 @@ // along with Polkadot. If not, see . use crate::{ - configuration, inclusion, initializer, - mock::MockAssigner, - paras, + configuration, inclusion, initializer, paras, paras::ParaKind, paras_inherent, scheduler::{ self, - common::{AssignmentProvider, AssignmentProviderConfig, V0Assignment}, + common::{AssignmentProvider, AssignmentProviderConfig}, CoreOccupied, ParasEntry, }, session_info, shared, @@ -666,7 +664,6 @@ impl BenchBuilder { // We don't allow a core to have both disputes and be marked fully available at this block. let max_cores = self.max_cores(); - MockAssigner::set_core_count(max_cores); let used_cores = (self.dispute_sessions.len() + self.backed_and_concluding_cores.len()) as u32; assert!(used_cores <= max_cores); @@ -706,21 +703,15 @@ impl BenchBuilder { let AssignmentProviderConfig { ttl, .. } = scheduler::Pallet::::assignment_provider_config(CoreIndex(i)); // Load an assignment into provider so that one is present to pop - MockAssigner::add_test_assignment(V0Assignment::new(i.into())); - let assignment = - T::AssignmentProvider::pop_assignment_for_core(CoreIndex(i)).unwrap(); + let assignment = ::AssignmentProvider::get_mock_assignment( + CoreIndex(i), + ParaId::from(i), + ); CoreOccupied::Paras(ParasEntry::new(assignment, now + ttl)) }) .collect(); scheduler::AvailabilityCores::::set(cores); - // Add assignments to the MockAssigner for each core. This facilitates legacy tests - // assuming the use of a lease holding parachain assigner. - for core_index in 0..max_cores { - // Core index == para_id in this case - MockAssigner::add_test_assignment(V0Assignment::new(core_index.into())); - } - Bench:: { data: ParachainsInherentData { bitfields, diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index c2b60209383..6c410604f32 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -483,6 +483,10 @@ pub mod mock_assigner { }, } } + #[cfg(any(feature = "runtime-benchmarks", test))] + fn get_mock_assignment(_: CoreIndex, para_id: ParaId) -> Self::AssignmentType { + V0Assignment { para_id } + } } // Provides a core count for scheduler tests defaulting to the most common number, diff --git a/polkadot/runtime/parachains/src/scheduler/common.rs b/polkadot/runtime/parachains/src/scheduler/common.rs index 832c6cd2f8f..890ce410436 100644 --- a/polkadot/runtime/parachains/src/scheduler/common.rs +++ b/polkadot/runtime/parachains/src/scheduler/common.rs @@ -170,6 +170,13 @@ pub trait AssignmentProvider { /// Returns a set of variables needed by the scheduler fn get_provider_config(core_idx: CoreIndex) -> AssignmentProviderConfig; + + /// Push some assignment for mocking/benchmarks purposes. + /// + /// Useful for benchmarks and testing. The returned assignment is "valid" and can if need be + /// passed into `report_processed` for example. + #[cfg(any(feature = "runtime-benchmarks", test))] + fn get_mock_assignment(core_idx: CoreIndex, para_id: ParaId) -> Self::AssignmentType; } /// An `AssignmentProvider` with a determined set of cores. diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index 6af5afa00b9..7334180170a 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -22,8 +22,6 @@ use frame_support::{ traits::OnRuntimeUpgrade, weights::Weight, }; -use sp_std::convert::identity; - /// Migration for potential changes in `Assignment` representation. pub mod assignment_version { use super::*; -- GitLab From 6f8dff156eaf31d10fec98130c4c23f33c7b0b59 Mon Sep 17 00:00:00 2001 From: eskimor Date: Sun, 19 Nov 2023 14:20:09 +0100 Subject: [PATCH 72/74] Fix doc --- polkadot/runtime/parachains/src/scheduler/common.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/polkadot/runtime/parachains/src/scheduler/common.rs b/polkadot/runtime/parachains/src/scheduler/common.rs index 890ce410436..304da1027f5 100644 --- a/polkadot/runtime/parachains/src/scheduler/common.rs +++ b/polkadot/runtime/parachains/src/scheduler/common.rs @@ -67,7 +67,7 @@ pub struct AssignmentVersion(u16); /// The storage key postfix that is used to store the [`AssignmentVersion`] per pallet. /// /// The full storage key is built by using: -/// Twox128([`PalletInfo::name`]) ++ Twox128([`ASSIGNMENT_VERSION_STORAGE_KEY_POSTFIX`]) +/// Twox128(`PalletInfo::name`) ++ Twox128([`ASSIGNMENT_VERSION_STORAGE_KEY_POSTFIX`]) pub const ASSIGNMENT_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__ASSIGNMENT_VERSION__:"; impl AssignmentVersion { @@ -91,7 +91,7 @@ impl AssignmentVersion { /// /// This function will panic iff `Pallet` can not be found by `PalletInfo`. /// In a runtime that is put together using - /// [`construct_runtime!`](crate::construct_runtime) this should never happen. + /// `construct_runtime!` this should never happen. /// /// It will also panic if this function isn't executed in an externalities /// provided environment. @@ -109,7 +109,7 @@ impl AssignmentVersion { /// /// This function will panic iff `Pallet` can not be found by `PalletInfo`. /// In a runtime that is put together using - /// [`construct_runtime!`](crate::construct_runtime) this should never happen. + /// `construct_runtime!` this should never happen. /// /// It will also panic if this function isn't executed in an externalities /// provided environment. -- GitLab From b59c2fe6b3a5c14aa766ec0aaae1c8ad42091dfe Mon Sep 17 00:00:00 2001 From: eskimor Date: Sun, 19 Nov 2023 14:31:07 +0100 Subject: [PATCH 73/74] Migration fix. --- polkadot/runtime/parachains/src/scheduler/migration.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index 7334180170a..79c7789fa12 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -428,7 +428,7 @@ pub mod v2 { let old_len = u32::from_be_bytes(state.try_into().unwrap()); ensure!( - Pallet::::claimqueue_len() as u32 == old_len, + v2::ClaimQueue::::get().len() as u32 == old_len, "Old ClaimQueue completely moved to new ClaimQueue after migration" ); -- GitLab From 8c1b9a5074d08d564a1fb12230e7ccae50dfd898 Mon Sep 17 00:00:00 2001 From: eskimor Date: Mon, 20 Nov 2023 10:17:05 +0100 Subject: [PATCH 74/74] More test fixes. --- polkadot/runtime/parachains/src/builder.rs | 2 +- .../runtime/parachains/src/paras_inherent/tests.rs | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index c753941f784..201bcb38b08 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -643,7 +643,7 @@ impl BenchBuilder { }) .collect(); - DisputeStatementSet { candidate_hash: candidate_hash, session, statements } + DisputeStatementSet { candidate_hash, session, statements } }) .collect() } diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 5b430c9fceb..9396a0359af 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -25,7 +25,8 @@ mod enter { use super::*; use crate::{ builder::{Bench, BenchBuilder}, - mock::{new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, + mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, + scheduler::common::V0Assignment, }; use assert_matches::assert_matches; use frame_support::assert_ok; @@ -60,6 +61,15 @@ mod enter { .set_backed_and_concluding_cores(backed_and_concluding) .set_dispute_sessions(&dispute_sessions[..]); + // Setup some assignments as needed: + mock_assigner::Pallet::::set_core_count(builder.max_cores()); + for core_index in 0..builder.max_cores() { + // Core index == para_id in this case + mock_assigner::Pallet::::add_test_assignment(V0Assignment::new( + core_index.into(), + )); + } + if let Some(code_size) = code_upgrade { builder.set_code_upgrade(code_size).build() } else { -- GitLab
Changelog

Sourced from secp256k1's changelog.

0.28.0 - 2023-10-23

  • Add bindings to the ElligatorSwift implementation #627
  • Depend on recent release of bitcoin_hashes v0.13.0 #621
  • Add a verify function to PublicKey #618
  • Add serialize function for schnorr::Signature #607
  • Bump MSRV to 1.48 #595
  • Remove implementations of PartialEq, Eq, PartialOrd, Ord, and Hash from the impl_array_newtype macro. Users will now need to derive these traits if they are wanted.

0.27.0 - 2023-03-15

0.26.0 - 2202-12-19

  • Update libsecp25k1 to v0.2.0

0.25.0 - 2022-12-07

0.24.1 - 2022-10-25

0.24.0 - 2022-07-20

0.23.4 - 2022-07-14

0.23.3 - 2022-06-29

0.23.2 - 2022-06-27

... (truncated)